From b7c249c9e468e9e385b6b828217eb3d225d24287 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 21 Jul 2023 18:30:53 +0000 Subject: [PATCH 001/270] Amazon WorkSpaces Update: Fixed VolumeEncryptionKey descriptions --- .changes/next-release/feature-AmazonWorkSpaces-c6aa83e.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AmazonWorkSpaces-c6aa83e.json diff --git a/.changes/next-release/feature-AmazonWorkSpaces-c6aa83e.json b/.changes/next-release/feature-AmazonWorkSpaces-c6aa83e.json new file mode 100644 index 000000000000..ca9f1a9d7dc4 --- /dev/null +++ b/.changes/next-release/feature-AmazonWorkSpaces-c6aa83e.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "Fixed VolumeEncryptionKey descriptions" +} diff --git a/services/workspaces/src/main/resources/codegen-resources/service-2.json b/services/workspaces/src/main/resources/codegen-resources/service-2.json index 60518a9a905a..3f36aa5bb45e 100644 --- a/services/workspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/workspaces/src/main/resources/codegen-resources/service-2.json @@ -4162,7 +4162,7 @@ }, "VolumeEncryptionKey":{ "shape":"VolumeEncryptionKey", - "documentation":"
The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
" + "documentation":"The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
" }, "UserVolumeEncryptionEnabled":{ "shape":"BooleanObject", @@ -4621,7 +4621,7 @@ }, "VolumeEncryptionKey":{ "shape":"VolumeEncryptionKey", - "documentation":"The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
" + "documentation":"The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
" }, "UserVolumeEncryptionEnabled":{ "shape":"BooleanObject", From c59f775245e6f7c0d3461e2175dc8d00aa0d97da Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 21 Jul 2023 18:30:54 +0000 Subject: [PATCH 002/270] Amazon Relational Database Service Update: Adds support for the DBSystemID parameter of CreateDBInstance to RDS Custom for Oracle. --- ...azonRelationalDatabaseService-845082c.json | 6 +++ .../codegen-resources/service-2.json | 52 +++++++++++-------- 2 files changed, 36 insertions(+), 22 deletions(-) create mode 100644 .changes/next-release/feature-AmazonRelationalDatabaseService-845082c.json diff --git a/.changes/next-release/feature-AmazonRelationalDatabaseService-845082c.json b/.changes/next-release/feature-AmazonRelationalDatabaseService-845082c.json new file mode 100644 index 000000000000..1c0e54a399f6 --- /dev/null +++ b/.changes/next-release/feature-AmazonRelationalDatabaseService-845082c.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adds support for the DBSystemID parameter of CreateDBInstance to RDS Custom for Oracle." +} diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index d2b0ff2172a0..cdef596c9235 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -638,7 +638,7 @@ {"shape":"BlueGreenDeploymentNotFoundFault"}, {"shape":"InvalidBlueGreenDeploymentStateFault"} ], - "documentation":"Deletes a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + "documentation":"Deletes a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" }, "DeleteCustomDBEngineVersion":{ "name":"DeleteCustomDBEngineVersion", @@ -947,7 +947,7 @@ "errors":[ {"shape":"BlueGreenDeploymentNotFoundFault"} ], - "documentation":"Returns information about blue/green deployments.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + "documentation":"Describes one or more blue/green deployments.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" }, "DescribeCertificates":{ "name":"DescribeCertificates", @@ -2620,7 +2620,7 @@ {"shape":"BlueGreenDeploymentNotFoundFault"}, {"shape":"InvalidBlueGreenDeploymentStateFault"} ], - "documentation":"Switches over a blue/green deployment.
Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + "documentation":"Switches over a blue/green deployment.
Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" }, "SwitchoverReadReplica":{ "name":"SwitchoverReadReplica", @@ -3021,7 +3021,7 @@ "members":{ "BlueGreenDeploymentIdentifier":{ "shape":"BlueGreenDeploymentIdentifier", - "documentation":"The system-generated identifier of the blue/green deployment.
" + "documentation":"The unique identifier of the blue/green deployment.
" }, "BlueGreenDeploymentName":{ "shape":"BlueGreenDeploymentName", @@ -3045,7 +3045,7 @@ }, "Status":{ "shape":"BlueGreenDeploymentStatus", - "documentation":"The status of the blue/green deployment.
Values:
PROVISIONING - Resources are being created in the green environment.
AVAILABLE - Resources are available in the green environment.
SWITCHOVER_IN_PROGRESS - The deployment is being switched from the blue environment to the green environment.
SWITCHOVER_COMPLETED - Switchover from the blue environment to the green environment is complete.
INVALID_CONFIGURATION - Resources in the green environment are invalid, so switchover isn't possible.
SWITCHOVER_FAILED - Switchover was attempted but failed.
DELETING - The blue/green deployment is being deleted.
The status of the blue/green deployment.
Valid Values:
PROVISIONING - Resources are being created in the green environment.
AVAILABLE - Resources are available in the green environment.
SWITCHOVER_IN_PROGRESS - The deployment is being switched from the blue environment to the green environment.
SWITCHOVER_COMPLETED - Switchover from the blue environment to the green environment is complete.
INVALID_CONFIGURATION - Resources in the green environment are invalid, so switchover isn't possible.
SWITCHOVER_FAILED - Switchover was attempted but failed.
DELETING - The blue/green deployment is being deleted.
Specifies the time when the blue/green deployment was created, in Universal Coordinated Time (UTC).
" + "documentation":"The time when the blue/green deployment was created, in Universal Coordinated Time (UTC).
" }, "DeleteTime":{ "shape":"TStamp", - "documentation":"Specifies the time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).
" + "documentation":"The time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).
" }, "TagList":{"shape":"TagList"} }, - "documentation":"Contains the details about a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + "documentation":"Details about a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" }, "BlueGreenDeploymentAlreadyExistsFault":{ "type":"structure", @@ -3114,10 +3114,10 @@ }, "Status":{ "shape":"BlueGreenDeploymentTaskStatus", - "documentation":"The status of the blue/green deployment task.
Values:
PENDING - The resources are being prepared for deployment.
IN_PROGRESS - The resource is being deployed.
COMPLETED - The resource has been deployed.
FAILED - Deployment of the resource failed.
The status of the blue/green deployment task.
Valid Values:
PENDING - The resource is being prepared for deployment.
IN_PROGRESS - The resource is being deployed.
COMPLETED - The resource has been deployed.
FAILED - Deployment of the resource failed.
Contains the details about a task for a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" + "documentation":"Details about a task for a blue/green deployment.
For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
" }, "BlueGreenDeploymentTaskList":{ "type":"list", @@ -3943,7 +3943,7 @@ "members":{ "DBName":{ "shape":"String", - "documentation":"The meaning of this parameter differs depending on the database engine.
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.
Constraints:
Must contain 1 to 64 alphanumeric characters.
Can't be a word reserved by the database engine.
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created.
Default: postgres
Constraints:
Must contain 1 to 63 alphanumeric characters.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created RDS Custom DB instance.
Default: ORCL
Constraints:
Must contain 1 to 8 alphanumeric characters.
Must contain a letter.
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
The Oracle System ID (SID) of the created DB instance.
Default: ORCL
Constraints:
Can't be longer than 8 characters.
Can't be a word reserved by the database engine, such as the string NULL.
The name of the database to create when the DB instance is created.
Default: postgres
Constraints:
Must contain 1 to 63 letters, numbers, or underscores.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the database engine.
Not applicable. Must be null.
The meaning of this parameter differs according to the database engine you use.
MySQL
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine
MariaDB
The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.
Constraints:
Must contain 1 to 64 letters or numbers.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine
PostgreSQL
The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance.
Constraints:
Must contain 1 to 63 letters, numbers, or underscores.
Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).
Can't be a word reserved by the specified database engine
Oracle
The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName.
Default: ORCL
Constraints:
Can't be longer than 8 characters
Amazon RDS Custom for Oracle
The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs.
Default: ORCL
Constraints:
It must contain 1 to 8 alphanumeric characters.
It must contain a letter.
It can't be a word reserved by the database engine.
Amazon RDS Custom for SQL Server
Not applicable. Must be null.
SQL Server
Not applicable. Must be null.
Amazon Aurora MySQL
The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.
Constraints:
It must contain 1 to 64 alphanumeric characters.
It can't be a word reserved by the database engine.
Amazon Aurora PostgreSQL
The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster.
Constraints:
It must contain 1 to 63 alphanumeric characters.
It must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).
It can't be a word reserved by the database engine.
The CA certificate identifier to use for the DB instance's server certificate.
This setting doesn't apply to RDS Custom DB instances.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
" + }, + "DBSystemId":{ + "shape":"String", + "documentation":"The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. In this context, the term \"Oracle database instance\" refers exclusively to the system global area (SGA) and Oracle background processes. If you don't specify a SID, the value defaults to RDSCDB. The Oracle SID is also the name of your CDB.
The meaning of this parameter differs depending on the database engine.
For RDS for MariaDB, Microsoft SQL Server, MySQL, and PostgreSQL - The name of the initial database specified for this DB instance when it was created, if one was provided. This same name is returned for the life of the DB instance.
For RDS for Oracle - The Oracle System ID (SID) of the created DB instance. This value is only returned when the object returned is an Oracle DB instance.
Contains the initial database name that you provided (if required) when you created the DB instance. This name is returned for the life of your DB instance. For an RDS for Oracle CDB instance, the name identifies the PDB rather than the CDB.
" }, "Endpoint":{ "shape":"Endpoint", @@ -7245,6 +7249,10 @@ "StorageThroughput":{ "shape":"IntegerOptional", "documentation":"Specifies the storage throughput for the DB snapshot.
" + }, + "DBSystemId":{ + "shape":"String", + "documentation":"The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. The Oracle SID is also the name of your CDB.
" } }, "documentation":"Contains the details of an Amazon RDS DB snapshot.
This data type is used as a response element in the DescribeDBSnapshots action.
The blue/green deployment identifier of the deployment to be deleted. This parameter isn't case-sensitive.
Constraints:
Must match an existing blue/green deployment identifier.
The unique identifier of the blue/green deployment to delete. This parameter isn't case-sensitive.
Constraints:
Must match an existing blue/green deployment identifier.
A value that indicates whether to delete the resources in the green environment. You can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED.
Specifies whether to delete the resources in the green environment. You can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED.
The blue/green deployment identifier. If this parameter is specified, information from only the specific blue/green deployment is returned. This parameter isn't case-sensitive.
Constraints:
If supplied, must match an existing blue/green deployment identifier.
The blue/green deployment identifier. If you specify this parameter, the response only includes information about the specific blue/green deployment. This parameter isn't case-sensitive.
Constraints:
Must match an existing blue/green deployment identifier.
A filter that specifies one or more blue/green deployments to describe.
Supported filters:
blue-green-deployment-identifier - Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers.
blue-green-deployment-name - Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names.
source - Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases.
target - Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.
A filter that specifies one or more blue/green deployments to describe.
Valid Values:
blue-green-deployment-identifier - Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers.
blue-green-deployment-name - Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names.
source - Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases.
target - Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.
An optional pagination token provided by a previous DescribeBlueGreenDeployments request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
An optional pagination token provided by a previous DescribeBlueGreenDeployments request. If you specify this parameter, the response only includes records beyond the marker, up to the value specified by MaxRecords.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints: Minimum 20, maximum 100.
" + "documentation":"The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
Constraints:
Must be a minimum of 20.
Can't exceed 100.
Contains a list of blue/green deployments for the user.
" + "documentation":"A list of blue/green deployments in the current account and Amazon Web Services Region.
" }, "Marker":{ "shape":"String", - "documentation":"A pagination token that can be used in a later DescribeBlueGreenDeployments request.
" + "documentation":"A pagination token that can be used in a later DescribeBlueGreenDeployments request.
The blue/green deployment identifier.
Constraints:
Must match an existing blue/green deployment identifier.
The unique identifier of the blue/green deployment.
Constraints:
Must match an existing blue/green deployment identifier.
The amount of time, in seconds, for the switchover to complete. The default is 300.
If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.
" + "documentation":"The amount of time, in seconds, for the switchover to complete.
Default: 300
If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.
" } } }, From f902b5aec497d922c8b863aeac1b35f18c8848de Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 21 Jul 2023 18:30:58 +0000 Subject: [PATCH 003/270] AWS Elemental MediaConvert Update: This release includes improvements to Preserve 444 handling, compatibility of HEVC sources without frame rates, and general improvements to MP4 outputs. --- .../feature-AWSElementalMediaConvert-b6afdfc.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 8 ++++---- 2 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 .changes/next-release/feature-AWSElementalMediaConvert-b6afdfc.json diff --git a/.changes/next-release/feature-AWSElementalMediaConvert-b6afdfc.json b/.changes/next-release/feature-AWSElementalMediaConvert-b6afdfc.json new file mode 100644 index 000000000000..fb7244b64ab5 --- /dev/null +++ b/.changes/next-release/feature-AWSElementalMediaConvert-b6afdfc.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes improvements to Preserve 444 handling, compatibility of HEVC sources without frame rates, and general improvements to MP4 outputs." +} diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index 0b7a288c5667..b8433f9290e5 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -6919,7 +6919,7 @@ "SegmentsPerSubdirectory": { "shape": "__integerMin1Max2147483647", "locationName": "segmentsPerSubdirectory", - "documentation": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect." + "documentation": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect." }, "StreamInfResolution": { "shape": "HlsStreamInfResolution", @@ -10614,7 +10614,7 @@ }, "ProresChromaSampling": { "type": "string", - "documentation": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose an output codec profile that supports 4:4:4 chroma sampling. These values for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all video preprocessors except for Nexguard file marker (PartnerWatermarking). When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) to Drop duplicate (DUPLICATE_DROP).", + "documentation": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer.", "enum": [ "PRESERVE_444_SAMPLING", "SUBSAMPLE_TO_422" @@ -10682,7 +10682,7 @@ "ChromaSampling": { "shape": "ProresChromaSampling", "locationName": "chromaSampling", - "documentation": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose an output codec profile that supports 4:4:4 chroma sampling. These values for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all video preprocessors except for Nexguard file marker (PartnerWatermarking). When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) to Drop duplicate (DUPLICATE_DROP)." + "documentation": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer." }, "CodecProfile": { "shape": "ProresCodecProfile", @@ -11832,7 +11832,7 @@ "Codec": { "shape": "VideoCodec", "locationName": "codec", - "documentation": "Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV." + "documentation": "Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV." }, "FrameCaptureSettings": { "shape": "FrameCaptureSettings", From 8331e90ffda69f7d0776cc71a6b39f1fa51e89a2 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 21 Jul 2023 18:31:06 +0000 Subject: [PATCH 004/270] AWS Glue Update: This release adds support for AWS Glue Crawler with Apache Hudi Tables, allowing Crawlers to discover Hudi Tables in S3 and register them in Glue Data Catalog for query engines to query against. --- .../next-release/feature-AWSGlue-52354c5.json | 6 +++ .../codegen-resources/service-2.json | 42 ++++++++++++++++--- 2 files changed, 42 insertions(+), 6 deletions(-) create mode 100644 .changes/next-release/feature-AWSGlue-52354c5.json diff --git a/.changes/next-release/feature-AWSGlue-52354c5.json b/.changes/next-release/feature-AWSGlue-52354c5.json new file mode 100644 index 000000000000..00fa4b8f9dd0 --- /dev/null +++ b/.changes/next-release/feature-AWSGlue-52354c5.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "This release adds support for AWS Glue Crawler with Apache Hudi Tables, allowing Crawlers to discover Hudi Tables in S3 and register them in Glue Data Catalog for query engines to query against." +} diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 354d9e8d9807..d090b8b82afd 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -6314,6 +6314,10 @@ "IcebergTargets":{ "shape":"IcebergTargetList", "documentation":"Specifies Apache Iceberg data store targets.
" + }, + "HudiTargets":{ + "shape":"HudiTargetList", + "documentation":"Specifies Apache Hudi data store targets.
" } }, "documentation":"Specifies data stores to crawl.
" @@ -6901,7 +6905,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
An array of Amazon S3 location strings for Hudi, each indicating the root folder with which the metadata files for a Hudi table resides. The Hudi folder may be located in a child folder of the root folder.
The crawler will scan all folders underneath a path for a Hudi folder.
" + }, + "ConnectionName":{ + "shape":"ConnectionName", + "documentation":"The name of the connection to use to connect to the Hudi target. If your Hudi files are stored in buckets that require VPC authorization, you can set their connection properties here.
" + }, + "Exclusions":{ + "shape":"PathList", + "documentation":"A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.
" + }, + "MaximumTraversalDepth":{ + "shape":"NullableInteger", + "documentation":"The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time.
" + } + }, + "documentation":"Specifies an Apache Hudi data source.
" + }, "HudiTargetCompressionType":{ "type":"string", "enum":[ @@ -12663,6 +12689,10 @@ "snappy" ] }, + "HudiTargetList":{ + "type":"list", + "member":{"shape":"HudiTarget"} + }, "IcebergInput":{ "type":"structure", "required":["MetadataOperation"], @@ -13119,7 +13149,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.
For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
The position of the ActiveSpeakerOnly video tile.
Defines the configuration for an ActiveSpeakerOnly video tile.
If true, TranscriptEvents with IsPartial: true are filtered out of the insights target.
Turns language identification on or off.
" + }, + "LanguageOptions":{ + "shape":"LanguageOptions", + "documentation":"The language options for the transcription, such as automatic language detection.
" + }, + "PreferredLanguage":{ + "shape":"CallAnalyticsLanguageCode", + "documentation":"The preferred language for the transcription.
" + }, + "VocabularyNames":{ + "shape":"VocabularyNames", + "documentation":"The names of the custom vocabulary or vocabularies used during transcription.
" + }, + "VocabularyFilterNames":{ + "shape":"VocabularyFilterNames", + "documentation":"The names of the custom vocabulary filter or filters using during transcription.
" } }, "documentation":"A structure that contains the configuration settings for an Amazon Transcribe processor.
" @@ -670,6 +708,22 @@ "exception":true }, "Boolean":{"type":"boolean"}, + "BorderColor":{ + "type":"string", + "enum":[ + "Black", + "Blue", + "Red", + "Green", + "White", + "Yellow" + ] + }, + "BorderThickness":{ + "type":"integer", + "max":20, + "min":1 + }, "CallAnalyticsLanguageCode":{ "type":"string", "enum":[ @@ -684,6 +738,13 @@ "pt-BR" ] }, + "CanvasOrientation":{ + "type":"string", + "enum":[ + "Landscape", + "Portrait" + ] + }, "CategoryName":{ "type":"string", "max":200, @@ -922,13 +983,19 @@ "enum":[ "PresenterOnly", "Horizontal", - "Vertical" + "Vertical", + "ActiveSpeakerOnly" ] }, "ContentType":{ "type":"string", "enum":["PII"] }, + "CornerRadius":{ + "type":"integer", + "max":20, + "min":1 + }, "CreateMediaCapturePipelineRequest":{ "type":"structure", "required":[ @@ -1079,7 +1146,7 @@ }, "S3RecordingSinkRuntimeConfiguration":{ "shape":"S3RecordingSinkRuntimeConfiguration", - "documentation":"The runtime configuration for the S3 recording sink.
" + "documentation":"The runtime configuration for the S3 recording sink. If specified, the settings in this structure override any settings in S3RecordingSinkConfiguration.
Defines the configuration options for a presenter only video tile.
" + }, + "ActiveSpeakerOnlyConfiguration":{ + "shape":"ActiveSpeakerOnlyConfiguration", + "documentation":"The configuration settings for an ActiveSpeakerOnly video tile.
The configuration settings for a horizontal layout.
" + }, + "VerticalLayoutConfiguration":{ + "shape":"VerticalLayoutConfiguration", + "documentation":"The configuration settings for a vertical layout.
" + }, + "VideoAttribute":{ + "shape":"VideoAttribute", + "documentation":"The attribute settings for the video tiles.
" + }, + "CanvasOrientation":{ + "shape":"CanvasOrientation", + "documentation":"The orientation setting, horizontal or vertical.
" } }, "documentation":"Specifies the type of grid layout.
" @@ -1336,6 +1423,46 @@ "min":36, "pattern":"[a-fA-F0-9]{8}(?:-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}" }, + "HighlightColor":{ + "type":"string", + "enum":[ + "Black", + "Blue", + "Red", + "Green", + "White", + "Yellow" + ] + }, + "HorizontalLayoutConfiguration":{ + "type":"structure", + "members":{ + "TileOrder":{ + "shape":"TileOrder", + "documentation":"Sets the automatic ordering of the video tiles.
" + }, + "TilePosition":{ + "shape":"HorizontalTilePosition", + "documentation":"Sets the position of horizontal tiles.
" + }, + "TileCount":{ + "shape":"TileCount", + "documentation":"The maximum number of video tiles to display.
" + }, + "TileAspectRatio":{ + "shape":"TileAspectRatio", + "documentation":"Sets the aspect ratio of the video tiles, such as 16:9.
" + } + }, + "documentation":"Defines the configuration settings for the horizontal layout.
" + }, + "HorizontalTilePosition":{ + "type":"string", + "enum":[ + "Top", + "Bottom" + ] + }, "Iso8601Timestamp":{ "type":"timestamp", "timestampFormat":"iso8601" @@ -1452,6 +1579,12 @@ }, "documentation":"A structure that contains the configuration settings for an AWS Lambda function's data sink.
" }, + "LanguageOptions":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[a-zA-Z-,]+" + }, "LayoutOption":{ "type":"string", "enum":["GridView"] @@ -2540,6 +2673,22 @@ "error":{"httpStatusCode":429}, "exception":true }, + "TileAspectRatio":{ + "type":"string", + "pattern":"^\\d{1,2}\\/\\d{1,2}$" + }, + "TileCount":{ + "type":"integer", + "max":10, + "min":1 + }, + "TileOrder":{ + "type":"string", + "enum":[ + "JoinSequence", + "SpeakerSequence" + ] + }, "Timestamp":{"type":"timestamp"}, "TimestampRange":{ "type":"structure", @@ -2662,6 +2811,35 @@ } } }, + "VerticalLayoutConfiguration":{ + "type":"structure", + "members":{ + "TileOrder":{ + "shape":"TileOrder", + "documentation":"Sets the automatic ordering of the video tiles.
" + }, + "TilePosition":{ + "shape":"VerticalTilePosition", + "documentation":"Sets the position of vertical tiles.
" + }, + "TileCount":{ + "shape":"TileCount", + "documentation":"The maximum number of tiles to display.
" + }, + "TileAspectRatio":{ + "shape":"TileAspectRatio", + "documentation":"Sets the aspect ratio of the video tiles, such as 16:9.
" + } + }, + "documentation":"Defines the configuration settings for a vertial layout.
" + }, + "VerticalTilePosition":{ + "type":"string", + "enum":[ + "Left", + "Right" + ] + }, "VideoArtifactsConfiguration":{ "type":"structure", "required":["State"], @@ -2677,6 +2855,28 @@ }, "documentation":"The video artifact configuration object.
" }, + "VideoAttribute":{ + "type":"structure", + "members":{ + "CornerRadius":{ + "shape":"CornerRadius", + "documentation":"Sets the corner radius of all video tiles.
" + }, + "BorderColor":{ + "shape":"BorderColor", + "documentation":"Defines the border color of all video tiles.
" + }, + "HighlightColor":{ + "shape":"HighlightColor", + "documentation":"Defines the highlight color for the active video tile.
" + }, + "BorderThickness":{ + "shape":"BorderThickness", + "documentation":"Defines the border thickness for all video tiles.
" + } + }, + "documentation":"Defines the settings for a video tile.
" + }, "VideoConcatenationConfiguration":{ "type":"structure", "required":["State"], @@ -2706,12 +2906,24 @@ "min":1, "pattern":"^[0-9a-zA-Z._-]+" }, + "VocabularyFilterNames":{ + "type":"string", + "max":3000, + "min":1, + "pattern":"^[a-zA-Z0-9,-._]+" + }, "VocabularyName":{ "type":"string", "max":200, "min":1, "pattern":"^[0-9a-zA-Z._-]+" }, + "VocabularyNames":{ + "type":"string", + "max":3000, + "min":1, + "pattern":"^[a-zA-Z0-9,-._]+" + }, "VoiceAnalyticsConfigurationStatus":{ "type":"string", "enum":[ From 5700dc8abd270be76ed97c47b11d3857b35398dc Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 24 Jul 2023 18:16:11 +0000 Subject: [PATCH 009/270] Amazon QuickSight Update: This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips. --- .../feature-AmazonQuickSight-e8a6315.json | 6 + .../codegen-resources/paginators-1.json | 42 ++ .../codegen-resources/service-2.json | 650 ++++++++++++++++++ 3 files changed, 698 insertions(+) create mode 100644 .changes/next-release/feature-AmazonQuickSight-e8a6315.json diff --git a/.changes/next-release/feature-AmazonQuickSight-e8a6315.json b/.changes/next-release/feature-AmazonQuickSight-e8a6315.json new file mode 100644 index 000000000000..926acdb4c129 --- /dev/null +++ b/.changes/next-release/feature-AmazonQuickSight-e8a6315.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips." +} diff --git a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json index 18fac260cadb..e5ce3441c3ce 100644 --- a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json +++ b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json @@ -42,6 +42,30 @@ "limit_key": "MaxResults", "result_key": "DataSources" }, + "ListGroupMemberships": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GroupMemberList" + }, + "ListGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GroupList" + }, + "ListIAMPolicyAssignments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "IAMPolicyAssignments" + }, + "ListIAMPolicyAssignmentsForUser": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ActiveAssignments" + }, "ListIngestions": { "input_token": "NextToken", "output_token": "NextToken", @@ -89,6 +113,18 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListUserGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GroupList" + }, + "ListUsers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "UserList" + }, "ListVPCConnections": { "input_token": "NextToken", "output_token": "NextToken", @@ -117,6 +153,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "DataSourceSummaries" + }, + "SearchGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GroupList" } } } diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index db2b5d68fa25..96c15e47c794 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -467,6 +467,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"PreconditionNotMetException"}, {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], @@ -1074,6 +1076,42 @@ ], "documentation":"Describes read and write permissions for a dashboard.
" }, + "DescribeDashboardSnapshotJob":{ + "name":"DescribeDashboardSnapshotJob", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/snapshot-jobs/{SnapshotJobId}" + }, + "input":{"shape":"DescribeDashboardSnapshotJobRequest"}, + "output":{"shape":"DescribeDashboardSnapshotJobResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"Describes an existing snapshot job.
Poll job descriptions after a job starts to know the status of the job. For information on available status codes, see JobStatus.
Describes the result of an existing snapshot job that has finished running.
A finished snapshot job will return a COMPLETED or FAILED status when you poll the job with a DescribeDashboardSnapshotJob API call.
If the job has not finished running, this operation returns a message that says Dashboard Snapshot Job with id <SnapshotjobId> has not reached a terminal state..
Starts an Asset Bundle import job.
An Asset Bundle import job imports specified Amazon QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon QuickSight account. Each Amazon QuickSight account can run up to 5 import jobs concurrently.
The API caller must have the necessary \"create\", \"describe\", and \"update\" permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported.
Starts an asynchronous job that generates a dashboard snapshot. You can request up to one paginated PDF and up to five CSVs per API call.
Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once the job succeeds, use the DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates.
Aggregation for date values.
COUNT: Aggregate by the total number of values, including duplicates.
DISTINCT_COUNT: Aggregate by the total number of distinct values.
MIN: Select the smallest date value.
MAX: Select the largest date value.
Aggregation for attributes.
" } }, "documentation":"An aggregation function aggregates values from a dimension or measure.
This is a union type structure. For this structure to be valid, only one of the attributes can be defined.
" @@ -3474,6 +3537,20 @@ }, "documentation":"The settings that you want to use with the Q search bar.
" }, + "AnonymousUserSnapshotJobResult":{ + "type":"structure", + "members":{ + "FileGroups":{ + "shape":"SnapshotJobResultFileGroupList", + "documentation":"A list of SnapshotJobResultFileGroup objects that contain information on the files that are requested during a StartDashboardSnapshotJob API call. If the job succeeds, these objects contain the location where the snapshot artifacts are stored. If the job fails, the objects contain information about the error that caused the job to fail.
A structure that contains the file groups that are requested for the artifact generation in a StartDashboardSnapshotJob API call.
Parameters for Amazon Athena.
" }, + "AttributeAggregationFunction":{ + "type":"structure", + "members":{ + "SimpleAttributeAggregation":{ + "shape":"SimpleAttributeAggregationFunction", + "documentation":"The built-in aggregation functions for attributes.
UNIQUE_VALUE: Returns the unique value for a field, aggregated by the dimension fields.
Used by the UNIQUE_VALUE aggregation function. If there are multiple values for the field used by the aggregation, the value for this property will be returned instead. Defaults to '*'.
Aggregation for attributes.
" + }, "AuroraParameters":{ "type":"structure", "required":[ @@ -9719,6 +9810,10 @@ "DateTimeFormat":{ "shape":"DateTimeFormat", "documentation":"Customize how dates are formatted in controls.
" + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"The configuration of info icon label options.
" } }, "documentation":"The display options of a control.
" @@ -11641,6 +11736,149 @@ } } }, + "DescribeDashboardSnapshotJobRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "SnapshotJobId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"The ID of the Amazon Web Services account that the dashboard snapshot job is executed in.
", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The ID of the dashboard that you have started a snapshot job for.
", + "location":"uri", + "locationName":"DashboardId" + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The ID of the job to be described. The job ID is set when you start a new job with a StartDashboardSnapshotJob API call.
The ID of the Amazon Web Services account that the dashboard snapshot job is executed in.
" + }, + "DashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The ID of the dashboard that you have started a snapshot job for.
" + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The ID of the job to be described. The job ID is set when you start a new job with a StartDashboardSnapshotJob API call.
The user configuration for the snapshot job. This information is provided when you make a StartDashboardSnapshotJob API call.
The snapshot configuration of the job. This information is provided when you make a StartDashboardSnapshotJob API call.
The Amazon Resource Name (ARN) for the snapshot job. The job ARN is generated when you start a new job with a StartDashboardSnapshotJob API call.
Indicates the status of a job. The status updates as the job executes. This shows one of the following values.
COMPLETED - The job was completed successfully.
FAILED - The job failed to execute.
QUEUED - The job is queued and hasn't started yet.
RUNNING - The job is still running.
The time that the snapshot job was created.
" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"The time that the snapshot job status was last updated.
" + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"The Amazon Web Services request ID for this operation.
" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"The HTTP status of the request
" + } + } + }, + "DescribeDashboardSnapshotJobResultRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "SnapshotJobId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"The ID of the Amazon Web Services account that the dashboard snapshot job is executed in.
", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The ID of the dashboard that you have started a snapshot job for.
", + "location":"uri", + "locationName":"DashboardId" + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The ID of the job to be described. The job ID is set when you start a new job with a StartDashboardSnapshotJob API call.
The Amazon Resource Name (ARN) for the snapshot job. The job ARN is generated when you start a new job with a StartDashboardSnapshotJob API call.
Indicates the status of a job after it has reached a terminal state. A finished snapshot job will retuen a COMPLETED or FAILED status.
The time that a snapshot job was created.
" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"The time that a snapshot job status was last updated.
" + }, + "Result":{ + "shape":"SnapshotJobResult", + "documentation":"The result of the snapshot job. Jobs that have successfully completed will return the S3Uri where they are located. Jobs that have failedwill return information on the error that caused the job to fail.
" + }, + "ErrorInfo":{ + "shape":"SnapshotJobErrorInfo", + "documentation":"Displays information for the error that caused a job to fail.
" + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"The Amazon Web Services request ID for this operation.
" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"The HTTP status of the request
", + "location":"statusCode" + } + } + }, "DescribeDataSetPermissionsRequest":{ "type":"structure", "required":[ @@ -13133,6 +13371,10 @@ "TitleOptions":{ "shape":"LabelOptions", "documentation":"The options to configure the title visibility, name, and font size.
" + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"The configuration of info icon label options.
" } }, "documentation":"The display options of a control.
" @@ -17438,6 +17680,10 @@ "TitleOptions":{ "shape":"LabelOptions", "documentation":"The options to configure the title visibility, name, and font size.
" + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"The configuration of info icon label options.
" } }, "documentation":"The display options of a control.
" @@ -22089,6 +22335,10 @@ "DateTimeFormat":{ "shape":"DateTimeFormat", "documentation":"Customize how dates are formatted in controls.
" + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"The configuration of info icon label options.
" } }, "documentation":"The display options of a control.
" @@ -22528,6 +22778,29 @@ "max":1024, "min":1 }, + "S3BucketConfiguration":{ + "type":"structure", + "required":[ + "BucketName", + "BucketPrefix", + "BucketRegion" + ], + "members":{ + "BucketName":{ + "shape":"NonEmptyString", + "documentation":"The name of an existing Amazon S3 bucket where the generated snapshot artifacts are sent.
" + }, + "BucketPrefix":{ + "shape":"NonEmptyString", + "documentation":"The prefix of the Amazon S3 bucket that the generated snapshots are stored in.
" + }, + "BucketRegion":{ + "shape":"NonEmptyString", + "documentation":"The region that the Amazon S3 bucket is located in. The bucket must be located in the same region that the StartDashboardSnapshotJob API call is made.
An optional structure that contains the Amazon S3 bucket configuration that the generated snapshots are stored in. If you don't provide this information, generated snapshots are stored in the default Amazon QuickSight bucket.
" + }, "S3Key":{ "type":"string", "max":1024, @@ -23405,6 +23678,11 @@ "type":"long", "sensitive":true }, + "SensitiveS3Uri":{ + "type":"string", + "pattern":"^(https|s3)://([^/]+)/?(.*)$", + "sensitive":true + }, "SensitiveString":{ "type":"string", "sensitive":true @@ -23496,6 +23774,12 @@ "max":128, "min":1 }, + "SessionTagKeyList":{ + "type":"list", + "member":{"shape":"SessionTagKey"}, + "max":50, + "min":1 + }, "SessionTagList":{ "type":"list", "member":{"shape":"SessionTag"}, @@ -23568,6 +23852,25 @@ "DATE_RANGE" ] }, + "SheetControlInfoIconLabelOptions":{ + "type":"structure", + "members":{ + "Visibility":{ + "shape":"Visibility", + "documentation":"The visibility configuration of info icon label options.
" + }, + "InfoIconText":{ + "shape":"SheetControlInfoIconText", + "documentation":"The text content of info icon.
" + } + }, + "documentation":"A control to display info icons for filters and parameters.
" + }, + "SheetControlInfoIconText":{ + "type":"string", + "max":100, + "min":1 + }, "SheetControlLayout":{ "type":"structure", "required":["Configuration"], @@ -23861,6 +24164,10 @@ }, "documentation":"A SignupResponse object that contains a summary of a newly created account.
The options to configure the title visibility, name, and font size.
" + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"The configuration of info icon label options.
" } }, "documentation":"The display options of a control.
" @@ -23961,6 +24272,279 @@ }, "documentation":"Options that determine the layout and display options of a chart's small multiples.
" }, + "SnapshotAnonymousUser":{ + "type":"structure", + "members":{ + "RowLevelPermissionTags":{ + "shape":"SessionTagList", + "documentation":"The tags to be used for row-level security (RLS). Make sure that the relevant datasets have RLS tags configured before you start a snapshot export job. You can configure the RLS tags of a dataset with a DataSet$RowLevelPermissionTagConfiguration API call.
These are not the tags that are used for Amazon Web Services resource tagging. For more information on row level security in Amazon QuickSight, see Using Row-Level Security (RLS) with Tagsin the Amazon QuickSight User Guide.
" + } + }, + "documentation":"A structure that contains information on the anonymous user configuration.
" + }, + "SnapshotAnonymousUserList":{ + "type":"list", + "member":{"shape":"SnapshotAnonymousUser"}, + "max":1, + "min":1 + }, + "SnapshotAnonymousUserRedacted":{ + "type":"structure", + "members":{ + "RowLevelPermissionTagKeys":{ + "shape":"SessionTagKeyList", + "documentation":"The tag keys for the RowLevelPermissionTags.
Use this structure to redact sensitive information that you provide about an anonymous user from the snapshot.
" + }, + "SnapshotAnonymousUserRedactedList":{ + "type":"list", + "member":{"shape":"SnapshotAnonymousUserRedacted"}, + "max":1, + "min":1 + }, + "SnapshotConfiguration":{ + "type":"structure", + "required":["FileGroups"], + "members":{ + "FileGroups":{ + "shape":"SnapshotFileGroupList", + "documentation":"A list of SnapshotJobResultFileGroup objects that contain information about the snapshot that is generated. This list can hold a maximum of 6 FileGroup configurations.
A structure that contains information on the Amazon S3 bucket that the generated snapshot is stored in.
" + }, + "Parameters":{"shape":"Parameters"} + }, + "documentation":"Describes the configuration of the dashboard snapshot.
" + }, + "SnapshotDestinationConfiguration":{ + "type":"structure", + "members":{ + "S3Destinations":{ + "shape":"SnapshotS3DestinationConfigurationList", + "documentation":" A list of SnapshotS3DestinationConfiguration objects that contain Amazon S3 destination configurations. This structure can hold a maximum of 1 S3DestinationConfiguration.
A structure that contains information on the Amazon S3 destinations of the generated snapshot.
" + }, + "SnapshotFile":{ + "type":"structure", + "required":[ + "SheetSelections", + "FormatType" + ], + "members":{ + "SheetSelections":{ + "shape":"SnapshotFileSheetSelectionList", + "documentation":"A list of SnapshotFileSheetSelection objects that contain information on the dashboard sheet that is exported. These objects provide information about the snapshot artifacts that are generated during the job. This structure can hold a maximum of 5 CSV configurations or 1 configuration for PDF.
The format of the snapshot file to be generated. You can choose between CSV and PDF.
A structure that contains the information for the snapshot that you want to generate. This information is provided by you when you start a new snapshot job.
" + }, + "SnapshotFileFormatType":{ + "type":"string", + "enum":[ + "CSV", + "PDF" + ] + }, + "SnapshotFileGroup":{ + "type":"structure", + "members":{ + "Files":{ + "shape":"SnapshotFileList", + "documentation":"A list of SnapshotFile objects that contain the information on the snapshot files that need to be generated. This structure can hold 1 configuration at a time.
A structure that contains the information on the snapshot files.
" + }, + "SnapshotFileGroupList":{ + "type":"list", + "member":{"shape":"SnapshotFileGroup"}, + "max":6, + "min":1 + }, + "SnapshotFileList":{ + "type":"list", + "member":{"shape":"SnapshotFile"}, + "max":1, + "min":1 + }, + "SnapshotFileSheetSelection":{ + "type":"structure", + "required":[ + "SheetId", + "SelectionScope" + ], + "members":{ + "SheetId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The sheet ID of the dashboard to generate the snapshot artifact from. This value is required for CSV or PDF format types.
" + }, + "SelectionScope":{ + "shape":"SnapshotFileSheetSelectionScope", + "documentation":"The selection scope of the visuals on a sheet of a dashboard that you are generating a snapthot of. You can choose one of the following options.
ALL_VISUALS - Selects all visuals that are on the sheet. This value is required if the snapshot is a PDF.
SELECTED_VISUALS - Select the visual that you want to add to the snapshot. This value is required if the snapshot is a CSV.
A structure that lists the IDs of the visuals in the selected sheet. Supported visual types are table, pivot table visuals. This value is required if you are generating a CSV. This value supports a maximum of 1 visual ID.
" + } + }, + "documentation":"A structure that contains information that identifies the snapshot that needs to be generated.
" + }, + "SnapshotFileSheetSelectionList":{ + "type":"list", + "member":{"shape":"SnapshotFileSheetSelection"}, + "max":1, + "min":1 + }, + "SnapshotFileSheetSelectionScope":{ + "type":"string", + "enum":[ + "ALL_VISUALS", + "SELECTED_VISUALS" + ] + }, + "SnapshotFileSheetSelectionVisualIdList":{ + "type":"list", + "member":{"shape":"ShortRestrictiveResourceId"}, + "max":1, + "min":1 + }, + "SnapshotJobErrorInfo":{ + "type":"structure", + "members":{ + "ErrorMessage":{ + "shape":"String", + "documentation":"The error message.
" + }, + "ErrorType":{ + "shape":"String", + "documentation":"The error type.
" + } + }, + "documentation":"An object that contains information on the error that caused the snapshot job to fail.
" + }, + "SnapshotJobResult":{ + "type":"structure", + "members":{ + "AnonymousUsers":{ + "shape":"AnonymousUserSnapshotJobResultList", + "documentation":" A list of AnonymousUserSnapshotJobResult objects that contain information on anonymous users and their user configurations. This data provided by you when you make a StartDashboardSnapshotJob API call.
An object that provides information on the result of a snapshot job. This object provides information about the job, the job status, and the location of the generated file.
" + }, + "SnapshotJobResultErrorInfo":{ + "type":"structure", + "members":{ + "ErrorMessage":{ + "shape":"String", + "documentation":"The error message.
" + }, + "ErrorType":{ + "shape":"String", + "documentation":"The error type.
" + } + }, + "documentation":"Information on the error that caused the snapshot job to fail.
" + }, + "SnapshotJobResultErrorInfoList":{ + "type":"list", + "member":{"shape":"SnapshotJobResultErrorInfo"} + }, + "SnapshotJobResultFileGroup":{ + "type":"structure", + "members":{ + "Files":{ + "shape":"SnapshotFileList", + "documentation":" A list of SnapshotFile objects.
A list of SnapshotJobS3Result objects.
A structure that contains information on the generated snapshot file groups.
" + }, + "SnapshotJobResultFileGroupList":{ + "type":"list", + "member":{"shape":"SnapshotJobResultFileGroup"} + }, + "SnapshotJobS3Result":{ + "type":"structure", + "members":{ + "S3DestinationConfiguration":{ + "shape":"SnapshotS3DestinationConfiguration", + "documentation":"A list of Amazon S3 bucket configurations that are provided when you make a StartDashboardSnapshotJob API call.
The Amazon S3 Uri.
" + }, + "ErrorInfo":{ + "shape":"SnapshotJobResultErrorInfoList", + "documentation":"An array of error records that describe any failures that occur while the dashboard snapshot job runs.
" + } + }, + "documentation":"The Amazon S3 result from the snapshot job. The result includes the DestinationConfiguration and the Amazon S3 Uri. If an error occured during the job, the result returns information on the error.
A structure that contains details about the Amazon S3 bucket that the generated dashboard snapshot is saved in.
" + } + }, + "documentation":"A structure that describes the Amazon S3 settings to use to save the generated dashboard snapshot.
" + }, + "SnapshotS3DestinationConfigurationList":{ + "type":"list", + "member":{"shape":"SnapshotS3DestinationConfiguration"}, + "max":1, + "min":1 + }, + "SnapshotUserConfiguration":{ + "type":"structure", + "members":{ + "AnonymousUsers":{ + "shape":"SnapshotAnonymousUserList", + "documentation":"An array of records that describe the anonymous users that the dashboard snapshot is generated for.
" + } + }, + "documentation":"A structure that contains information about the users that the dashboard snapshot is generated for.
" + }, + "SnapshotUserConfigurationRedacted":{ + "type":"structure", + "members":{ + "AnonymousUsers":{ + "shape":"SnapshotAnonymousUserRedactedList", + "documentation":"An array of records that describe anonymous users that the dashboard snapshot is generated for. Sensitive user information is excluded.
" + } + }, + "documentation":"A structure that contains information about the users that the dashboard snapshot is generated for. Sensitive user information is excluded.
" + }, "SnowflakeParameters":{ "type":"structure", "required":[ @@ -24195,6 +24779,64 @@ } } }, + "StartDashboardSnapshotJobRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "SnapshotJobId", + "UserConfiguration", + "SnapshotConfiguration" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"The ID of the Amazon Web Services account that the dashboard snapshot job is executed in.
", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The ID of the dashboard that you want to start a snapshot job for.
", + "location":"uri", + "locationName":"DashboardId" + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"An ID for the dashboard snapshot job. This ID is unique to the dashboard while the job is running. This ID can be used to poll the status of a job with a DescribeDashboardSnapshotJob while the job runs. You can reuse this ID for another job 24 hours after the current job is completed.
A structure that contains information about the anonymous users that the generated snapshot is for. This API will not return information about registered Amazon QuickSight.
" + }, + "SnapshotConfiguration":{ + "shape":"SnapshotConfiguration", + "documentation":"A structure that describes the configuration of the dashboard snapshot.
" + } + } + }, + "StartDashboardSnapshotJobResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"The Amazon Resource Name (ARN) for the dashboard snapshot job.
" + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"The ID of the job. The job ID is set when you start a new job with a StartDashboardSnapshotJob API call.
The Amazon Web Services request ID for this operation.
" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"The HTTP status of the request
", + "location":"statusCode" + } + } + }, "StatePersistenceConfigurations":{ "type":"structure", "required":["Enabled"], @@ -25332,6 +25974,10 @@ "PlaceholderOptions":{ "shape":"TextControlPlaceholderOptions", "documentation":"The configuration of the placeholder options in a text area control.
" + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"The configuration of info icon label options.
" } }, "documentation":"The display options of a control.
" @@ -25374,6 +26020,10 @@ "PlaceholderOptions":{ "shape":"TextControlPlaceholderOptions", "documentation":"The configuration of the placeholder options in a text field control.
" + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"The configuration of info icon label options.
" } }, "documentation":"The display options of a control.
" From 8052c592ba39722ad187f24c27a318c1fa58f2f9 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 24 Jul 2023 18:16:19 +0000 Subject: [PATCH 010/270] AWS Cost Explorer Service Update: This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID. --- ...eature-AWSCostExplorerService-21849f3.json | 6 + .../codegen-resources/endpoint-rule-set.json | 914 +++++------------- .../codegen-resources/endpoint-tests.json | 315 +++++- .../codegen-resources/service-2.json | 214 +++- 4 files changed, 768 insertions(+), 681 deletions(-) create mode 100644 .changes/next-release/feature-AWSCostExplorerService-21849f3.json diff --git a/.changes/next-release/feature-AWSCostExplorerService-21849f3.json b/.changes/next-release/feature-AWSCostExplorerService-21849f3.json new file mode 100644 index 000000000000..ce2d2f445cc1 --- /dev/null +++ b/.changes/next-release/feature-AWSCostExplorerService-21849f3.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID." +} diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json index 09cea2bc7d5a..e4c99e99633a 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,64 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws" + "ref": "Region" + } ] } ], @@ -128,22 +111,13 @@ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -151,556 +125,303 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://ce.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] + }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://ce.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "cn-northwest-1" } ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer-fips.{Region}.api.aws", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] } ] }, - "headers": {} - }, - "type": "endpoint" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer-fips.{Region}.amazonaws.com", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] } ] - }, - "headers": {} - }, - "type": "endpoint" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer.{Region}.api.aws", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] } ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://ce.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer-fips.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" + "conditions": [], + "endpoint": { + "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer-fips.{Region}.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [], - "endpoint": { - "url": "https://cost-explorer.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://ce.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [], "type": "tree", @@ -708,7 +429,7 @@ { "conditions": [], "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -717,134 +438,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://ce.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://ce.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json index b733909b76df..26d783f3157f 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,5 +1,88 @@ { "testCases": [ + { + "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://ce.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://ce.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", "expect": { @@ -7,9 +90,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "cn-northwest-1", "name": "sigv4", - "signingName": "ce" + "signingName": "ce", + "signingRegion": "cn-northwest-1" } ] }, @@ -17,35 +100,236 @@ } }, "params": { + "Region": "aws-cn-global", "UseFIPS": false, - "UseDualStack": false, - "Region": "aws-cn-global" + "UseDualStack": false } }, { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "name": "sigv4", - "signingName": "ce" + "signingName": "ce", + "signingRegion": "cn-northwest-1" } ] }, - "url": "https://ce.us-east-1.amazonaws.com" + "url": "https://ce.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "aws-global" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -54,7 +338,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -64,9 +347,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -76,11 +359,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/costexplorer/src/main/resources/codegen-resources/service-2.json b/services/costexplorer/src/main/resources/codegen-resources/service-2.json index 42715b57eeb4..37fbde371ede 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/service-2.json +++ b/services/costexplorer/src/main/resources/codegen-resources/service-2.json @@ -296,6 +296,20 @@ ], "documentation":"Creates recommendations that help you save cost by identifying idle and underutilized Amazon EC2 instances.
Recommendations are generated to either downsize or terminate instances, along with providing savings detail and metrics. For more information about calculation and function, see Optimizing Your Cost with Rightsizing Recommendations in the Billing and Cost Management User Guide.
" }, + "GetSavingsPlanPurchaseRecommendationDetails":{ + "name":"GetSavingsPlanPurchaseRecommendationDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSavingsPlanPurchaseRecommendationDetailsRequest"}, + "output":{"shape":"GetSavingsPlanPurchaseRecommendationDetailsResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"DataUnavailableException"} + ], + "documentation":"Retrieves the details for a Savings Plan recommendation. These details include the hourly data-points that construct the new cost, coverage, and utilization charts.
" + }, "GetSavingsPlansCoverage":{ "name":"GetSavingsPlansCoverage", "http":{ @@ -423,7 +437,8 @@ "output":{"shape":"ListSavingsPlansPurchaseRecommendationGenerationResponse"}, "errors":[ {"shape":"LimitExceededException"}, - {"shape":"InvalidNextTokenException"} + {"shape":"InvalidNextTokenException"}, + {"shape":"DataUnavailableException"} ], "documentation":"Retrieves a list of your historical recommendation generations within the past 30 days.
" }, @@ -465,7 +480,8 @@ "errors":[ {"shape":"LimitExceededException"}, {"shape":"ServiceQuotaExceededException"}, - {"shape":"GenerationExistsException"} + {"shape":"GenerationExistsException"}, + {"shape":"DataUnavailableException"} ], "documentation":"Requests a Savings Plans recommendation generation. This enables you to calculate a fresh set of Savings Plans recommendations that takes your latest usage data and current Savings Plans inventory into account. You can refresh Savings Plans recommendations up to three times daily for a consolidated billing family.
StartSavingsPlansPurchaseRecommendationGeneration has no request syntax because no input parameters are needed to support this operation.
Updates an existing cost anomaly monitor subscription.
" + "documentation":"Updates an existing cost anomaly subscription. Specify the fields that you want to update. Omitted fields are unchanged.
The JSON below describes the generic construct for each type. See Request Parameters for possible values as they apply to AnomalySubscription.
(deprecated)
The dollar value that triggers a notification if the threshold is exceeded.
This field has been deprecated. To specify a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.
One of Threshold or ThresholdExpression is required for this resource.
", + "documentation":"(deprecated)
An absolute dollar value that must be exceeded by the anomaly's total impact (see Impact for more details) for an anomaly notification to be generated.
This field has been deprecated. To specify a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.
One of Threshold or ThresholdExpression is required for this resource. You cannot specify both.
", "deprecated":true, "deprecatedMessage":"Threshold has been deprecated in favor of ThresholdExpression" }, "Frequency":{ "shape":"AnomalySubscriptionFrequency", - "documentation":"The frequency that anomaly reports are sent over email.
" + "documentation":"The frequency that anomaly notifications are sent. Notifications are sent either over email (for DAILY and WEEKLY frequencies) or SNS (for IMMEDIATE frequency). For more information, see Creating an Amazon SNS topic for anomaly notifications.
" }, "SubscriptionName":{ "shape":"GenericString", @@ -747,10 +763,10 @@ }, "ThresholdExpression":{ "shape":"Expression", - "documentation":"An Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000.
One of Threshold or ThresholdExpression is required for this resource.
The following are examples of valid ThresholdExpressions:
Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }
Percentage threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }
AND two thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }
OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }
An Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE, corresponding to an anomaly’s TotalImpact and TotalImpactPercentage, respectively (see Impact for more details). The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000 in string format.
One of Threshold or ThresholdExpression is required for this resource. You cannot specify both.
The following are examples of valid ThresholdExpressions:
Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }
Percentage threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }
AND two thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }
OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }
The association between a monitor, threshold, and list of subscribers used to deliver notifications about anomalies detected by a monitor that exceeds a threshold. The content consists of the detailed metadata and the current status of the AnomalySubscription object.
An AnomalySubscription resource (also referred to as an alert subscription) sends notifications about specific anomalies that meet an alerting criteria defined by you.
You can specify the frequency of the alerts and the subscribers to notify.
Anomaly subscriptions can be associated with one or more AnomalyMonitor resources, and they only send notifications about anomalies detected by those associated monitors. You can also configure a threshold to further control which anomalies are included in the notifications.
Anomalies that don’t exceed the chosen threshold and therefore don’t trigger notifications from an anomaly subscription will still be available on the console and from the GetAnomalies API.
An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, and LINKED_ACCOUNT_NAME.
Root level OR isn't supported. We recommend that you create a separate rule instead.
RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.
An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, LINKED_ACCOUNT_NAME, REGION, and USAGE_TYPE.
RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.
The filter that's based on CostCategory values.
Use Expression to filter in various Cost Explorer APIs.
Not all Expression types are supported in each API. Refer to the documentation for each specific API to see what is supported.
There are two patterns:
Simple dimension values.
There are three types of simple dimension values: CostCategories, Tags, and Dimensions.
Specify the CostCategories field to define a filter that acts on Cost Categories.
Specify the Tags field to define a filter that acts on Cost Allocation Tags.
Specify the Dimensions field to define a filter that acts on the DimensionValues .
For each filter type, you can set the dimension name and values for the filters that you plan to use.
For example, you can filter for REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name (for example, REGION==US East (N. Virginia).
The corresponding Expression for this example is as follows: { \"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", “us-west-1” ] } }
As shown in the previous example, lists of dimension values are combined with OR when applying the filter.
You can also set different match options to further control how the filter behaves. Not all APIs support match options. Refer to the documentation for each specific API to see what is supported.
For example, you can filter for linked account names that start with “a”.
The corresponding Expression for this example is as follows: { \"Dimensions\": { \"Key\": \"LINKED_ACCOUNT_NAME\", \"MatchOptions\": [ \"STARTS_WITH\" ], \"Values\": [ \"a\" ] } }
Compound Expression types with logical operations.
You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. By doing this, you can filter by more advanced options.
For example, you can filter by ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer).
The corresponding Expression for this example is as follows: { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", \"us-west-1\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [\"DataTransfer\"] }}} ] }
Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error: { \"And\": [ ... ], \"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [ \"DataTransfer\" ] } }
The following is an example of the corresponding error message: \"Expression has more than one roots. Only one root operator is allowed for each expression: And, Or, Not, Dimensions, Tags, CostCategories\"
For the GetRightsizingRecommendation action, a combination of OR and NOT isn't supported. OR isn't supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE.
For the GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR aren't supported. Dimensions are limited to LINKED_ACCOUNT.
Use Expression to filter in various Cost Explorer APIs.
Not all Expression types are supported in each API. Refer to the documentation for each specific API to see what is supported.
There are two patterns:
Simple dimension values.
There are three types of simple dimension values: CostCategories, Tags, and Dimensions.
Specify the CostCategories field to define a filter that acts on Cost Categories.
Specify the Tags field to define a filter that acts on Cost Allocation Tags.
Specify the Dimensions field to define a filter that acts on the DimensionValues .
For each filter type, you can set the dimension name and values for the filters that you plan to use.
For example, you can filter for REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name (for example, REGION==US East (N. Virginia).
The corresponding Expression for this example is as follows: { \"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", \"us-west-1\" ] } }
As shown in the previous example, lists of dimension values are combined with OR when applying the filter.
You can also set different match options to further control how the filter behaves. Not all APIs support match options. Refer to the documentation for each specific API to see what is supported.
For example, you can filter for linked account names that start with \"a\".
The corresponding Expression for this example is as follows: { \"Dimensions\": { \"Key\": \"LINKED_ACCOUNT_NAME\", \"MatchOptions\": [ \"STARTS_WITH\" ], \"Values\": [ \"a\" ] } }
Compound Expression types with logical operations.
You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. By doing this, you can filter by more advanced options.
For example, you can filter by ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer).
The corresponding Expression for this example is as follows: { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", \"us-west-1\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [\"DataTransfer\"] }}} ] }
Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error: { \"And\": [ ... ], \"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [ \"DataTransfer\" ] } }
The following is an example of the corresponding error message: \"Expression has more than one roots. Only one root operator is allowed for each expression: And, Or, Not, Dimensions, Tags, CostCategories\"
For the GetRightsizingRecommendation action, a combination of OR and NOT isn't supported. OR isn't supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE.
For the GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR aren't supported. Dimensions are limited to LINKED_ACCOUNT.
The ID that is associated with the Savings Plan recommendation.
" + } + } + }, + "GetSavingsPlanPurchaseRecommendationDetailsResponse":{ + "type":"structure", + "members":{ + "RecommendationDetailId":{ + "shape":"RecommendationDetailId", + "documentation":"The ID that is associated with the Savings Plan recommendation.
" + }, + "RecommendationDetailData":{ + "shape":"RecommendationDetailData", + "documentation":"Contains detailed information about a specific Savings Plan recommendation.
" + } + } + }, "GetSavingsPlansCoverageRequest":{ "type":"structure", "required":["TimePeriod"], @@ -3158,6 +3197,10 @@ "key":{"shape":"MetricName"}, "value":{"shape":"MetricValue"} }, + "MetricsOverLookbackPeriod":{ + "type":"list", + "member":{"shape":"RecommendationDetailHourlyMetrics"} + }, "ModifyRecommendationDetail":{ "type":"structure", "members":{ @@ -3346,6 +3389,151 @@ }, "RICostForUnusedHours":{"type":"string"}, "RealizedSavings":{"type":"string"}, + "RecommendationDetailData":{ + "type":"structure", + "members":{ + "AccountScope":{ + "shape":"AccountScope", + "documentation":"The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the management account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.
" + }, + "LookbackPeriodInDays":{ + "shape":"LookbackPeriodInDays", + "documentation":"How many days of previous usage that Amazon Web Services considers when making this recommendation.
" + }, + "SavingsPlansType":{ + "shape":"SupportedSavingsPlansType", + "documentation":"The requested Savings Plan recommendation type.
" + }, + "TermInYears":{ + "shape":"TermInYears", + "documentation":"The term of the commitment in years.
" + }, + "PaymentOption":{ + "shape":"PaymentOption", + "documentation":"The payment option for the commitment (for example, All Upfront or No Upfront).
" + }, + "AccountId":{ + "shape":"GenericString", + "documentation":"The AccountID that the recommendation is generated for.
" + }, + "CurrencyCode":{ + "shape":"GenericString", + "documentation":"The currency code that Amazon Web Services used to generate the recommendation and present potential savings.
" + }, + "InstanceFamily":{ + "shape":"GenericString", + "documentation":"The instance family of the recommended Savings Plan.
" + }, + "Region":{ + "shape":"GenericString", + "documentation":"The region the recommendation is generated for.
" + }, + "OfferingId":{ + "shape":"GenericString", + "documentation":"The unique ID that's used to distinguish Savings Plans from one another.
" + }, + "GenerationTimestamp":{"shape":"ZonedDateTime"}, + "LatestUsageTimestamp":{"shape":"ZonedDateTime"}, + "CurrentAverageHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"The average value of hourly On-Demand spend over the lookback period of the applicable usage type.
" + }, + "CurrentMaximumHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"The highest value of hourly On-Demand spend over the lookback period of the applicable usage type.
" + }, + "CurrentMinimumHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"The lowest value of hourly On-Demand spend over the lookback period of the applicable usage type.
" + }, + "EstimatedAverageUtilization":{ + "shape":"GenericString", + "documentation":"The estimated utilization of the recommended Savings Plan.
" + }, + "EstimatedMonthlySavingsAmount":{ + "shape":"GenericString", + "documentation":"The estimated monthly savings amount based on the recommended Savings Plan.
" + }, + "EstimatedOnDemandCost":{ + "shape":"GenericString", + "documentation":"The remaining On-Demand cost estimated to not be covered by the recommended Savings Plan, over the length of the lookback period.
" + }, + "EstimatedOnDemandCostWithCurrentCommitment":{ + "shape":"GenericString", + "documentation":"The estimated On-Demand costs you expect with no additional commitment, based on your usage of the selected time period and the Savings Plan you own.
" + }, + "EstimatedROI":{ + "shape":"GenericString", + "documentation":"The estimated return on investment that's based on the recommended Savings Plan that you purchased. This is calculated as estimatedSavingsAmount/estimatedSPCost*100.
" + }, + "EstimatedSPCost":{ + "shape":"GenericString", + "documentation":"The cost of the recommended Savings Plan over the length of the lookback period.
" + }, + "EstimatedSavingsAmount":{ + "shape":"GenericString", + "documentation":"The estimated savings amount that's based on the recommended Savings Plan over the length of the lookback period.
" + }, + "EstimatedSavingsPercentage":{ + "shape":"GenericString", + "documentation":"The estimated savings percentage relative to the total cost of applicable On-Demand usage over the lookback period.
" + }, + "ExistingHourlyCommitment":{ + "shape":"GenericString", + "documentation":"The existing hourly commitment for the Savings Plan type.
" + }, + "HourlyCommitmentToPurchase":{ + "shape":"GenericString", + "documentation":"The recommended hourly commitment level for the Savings Plan type and the configuration that's based on the usage during the lookback period.
" + }, + "UpfrontCost":{ + "shape":"GenericString", + "documentation":"The upfront cost of the recommended Savings Plan, based on the selected payment option.
" + }, + "CurrentAverageCoverage":{ + "shape":"GenericString", + "documentation":"The average value of hourly coverage over the lookback period.
" + }, + "EstimatedAverageCoverage":{ + "shape":"GenericString", + "documentation":"The estimated coverage of the recommended Savings Plan.
" + }, + "MetricsOverLookbackPeriod":{ + "shape":"MetricsOverLookbackPeriod", + "documentation":"The related hourly cost, coverage, and utilization metrics over the lookback period.
" + } + }, + "documentation":"The details and metrics for the given recommendation.
" + }, + "RecommendationDetailHourlyMetrics":{ + "type":"structure", + "members":{ + "StartTime":{"shape":"ZonedDateTime"}, + "EstimatedOnDemandCost":{ + "shape":"GenericString", + "documentation":"The remaining On-Demand cost estimated to not be covered by the recommended Savings Plan, over the length of the lookback period.
" + }, + "CurrentCoverage":{ + "shape":"GenericString", + "documentation":"The current amount of Savings Plans eligible usage that the Savings Plan covered.
" + }, + "EstimatedCoverage":{ + "shape":"GenericString", + "documentation":"The estimated coverage amount based on the recommended Savings Plan.
" + }, + "EstimatedNewCommitmentUtilization":{ + "shape":"GenericString", + "documentation":"The estimated utilization for the recommended Savings Plan.
" + } + }, + "documentation":"Contains the hourly metrics for the given recommendation over the lookback period.
" + }, + "RecommendationDetailId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[\\S\\s]{8}-[\\S\\s]{4}-[\\S\\s]{4}-[\\S\\s]{4}-[\\S\\s]{12}$" + }, "RecommendationId":{ "type":"string", "max":36, @@ -4096,6 +4284,10 @@ "CurrentAverageHourlyOnDemandSpend":{ "shape":"GenericString", "documentation":"The average value of hourly On-Demand spend over the lookback period of the applicable usage type.
" + }, + "RecommendationDetailId":{ + "shape":"RecommendationDetailId", + "documentation":"Contains detailed information about a specific Savings Plan recommendation.
" } }, "documentation":"Details for your recommended Savings Plans.
" @@ -4645,7 +4837,7 @@ }, "Threshold":{ "shape":"NullableNonNegativeDouble", - "documentation":"(deprecated)
The update to the threshold value for receiving notifications.
This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.
", + "documentation":"(deprecated)
The update to the threshold value for receiving notifications.
This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.
You can specify either Threshold or ThresholdExpression, but not both.
", "deprecated":true, "deprecatedMessage":"Threshold has been deprecated in favor of ThresholdExpression" }, @@ -4667,7 +4859,7 @@ }, "ThresholdExpression":{ "shape":"Expression", - "documentation":"The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000.
The following are examples of valid ThresholdExpressions:
Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }
Percentage threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }
AND two thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }
OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }
The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE, corresponding to an anomaly’s TotalImpact and TotalImpactPercentage, respectively (see Impact for more details). The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000 in string format.
You can specify either Threshold or ThresholdExpression, but not both.
The following are examples of valid ThresholdExpressions:
Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }
Percentage threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }
AND two thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }
OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }
Specifies your data quality evaluation criteria. Allows multiple input data and returns a collection of Dynamic Frames.
" + }, + "Recipe":{ + "shape":"Recipe", + "documentation":"Specifies a Glue DataBrew recipe node.
" } }, "documentation":" CodeGenConfigurationNode enumerates all valid Node types. One and only one of its member variables can be populated.
The name of the Glue Studio node.
" + }, + "Inputs":{ + "shape":"OneInput", + "documentation":"The nodes that are inputs to the recipe node, identified by id.
" + }, + "RecipeReference":{ + "shape":"RecipeReference", + "documentation":"A reference to the DataBrew recipe used by the node.
" + } + }, + "documentation":"A Glue Studio node that uses a Glue DataBrew recipe in Glue jobs.
" + }, + "RecipeReference":{ + "type":"structure", + "required":[ + "RecipeArn", + "RecipeVersion" + ], + "members":{ + "RecipeArn":{ + "shape":"EnclosedInStringProperty", + "documentation":"The ARN of the DataBrew recipe.
" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"The RecipeVersion of the DataBrew recipe.
" + } + }, + "documentation":"A reference to a Glue DataBrew recipe.
" + }, + "RecipeVersion":{ + "type":"string", + "max":16, + "min":1 + }, "RecordsCount":{ "type":"long", "box":true From a2a4b4d77566adccb7ddd7f715f5a62864e43bc4 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 24 Jul 2023 18:16:46 +0000 Subject: [PATCH 012/270] Amazon Elastic Compute Cloud Update: Add "disabled" enum value to SpotInstanceState. --- .../feature-AmazonElasticComputeCloud-93ef22d.json | 6 ++++++ .../ec2/src/main/resources/codegen-resources/service-2.json | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-AmazonElasticComputeCloud-93ef22d.json diff --git a/.changes/next-release/feature-AmazonElasticComputeCloud-93ef22d.json b/.changes/next-release/feature-AmazonElasticComputeCloud-93ef22d.json new file mode 100644 index 000000000000..45b928d91b24 --- /dev/null +++ b/.changes/next-release/feature-AmazonElasticComputeCloud-93ef22d.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Add \"disabled\" enum value to SpotInstanceState." +} diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 6c6b1eade17b..ce2907650a2c 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -51477,7 +51477,8 @@ "active", "closed", "cancelled", - "failed" + "failed", + "disabled" ] }, "SpotInstanceStateFault":{ From d8a20f1c2d1ec1ab33b00ae89972daad21070be5 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 24 Jul 2023 18:17:07 +0000 Subject: [PATCH 013/270] AWS CloudFormation Update: This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts. --- .../feature-AWSCloudFormation-9f2265f.json | 6 + .../codegen-resources/endpoint-tests.json | 206 +++++++++--------- .../codegen-resources/service-2.json | 172 +++++++++++++-- 3 files changed, 257 insertions(+), 127 deletions(-) create mode 100644 .changes/next-release/feature-AWSCloudFormation-9f2265f.json diff --git a/.changes/next-release/feature-AWSCloudFormation-9f2265f.json b/.changes/next-release/feature-AWSCloudFormation-9f2265f.json new file mode 100644 index 000000000000..d2ab8cb7fbae --- /dev/null +++ b/.changes/next-release/feature-AWSCloudFormation-9f2265f.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts." +} diff --git a/services/cloudformation/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudformation/src/main/resources/codegen-resources/endpoint-tests.json index b20e6d8f1e7a..dcd0159a3812 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudformation/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "Region": "af-south-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "af-south-1" } }, { @@ -21,9 +21,9 @@ } }, "params": { - "Region": "ap-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ap-east-1" } }, { @@ -34,9 +34,9 @@ } }, "params": { - "Region": "ap-northeast-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ap-northeast-1" } }, { @@ -47,9 +47,9 @@ } }, "params": { - "Region": "ap-northeast-2", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ap-northeast-2" } }, { @@ -60,9 +60,9 @@ } }, "params": { - "Region": "ap-northeast-3", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ap-northeast-3" } }, { @@ -73,9 +73,9 @@ } }, "params": { - "Region": "ap-south-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ap-south-1" } }, { @@ -86,9 +86,9 @@ } }, "params": { - "Region": "ap-southeast-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ap-southeast-1" } }, { @@ -99,9 +99,9 @@ } }, "params": { - "Region": "ap-southeast-2", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ap-southeast-2" } }, { @@ -112,9 +112,9 @@ } }, "params": { - "Region": "ap-southeast-3", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ap-southeast-3" } }, { @@ -125,9 +125,9 @@ } }, "params": { - "Region": "ca-central-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "ca-central-1" } }, { @@ -138,9 +138,9 @@ } }, "params": { - "Region": "eu-central-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "eu-central-1" } }, { @@ -151,9 +151,9 @@ } }, "params": { - "Region": "eu-north-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "eu-north-1" } }, { @@ -164,9 +164,9 @@ } }, "params": { - "Region": "eu-south-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "eu-south-1" } }, { @@ -177,9 +177,9 @@ } }, "params": { - "Region": "eu-west-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "eu-west-1" } }, { @@ -190,9 +190,9 @@ } }, "params": { - "Region": "eu-west-2", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "eu-west-2" } }, { @@ -203,9 +203,9 @@ } }, "params": { - "Region": "eu-west-3", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "eu-west-3" } }, { @@ -216,9 +216,9 @@ } }, "params": { - "Region": "me-south-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "me-south-1" } }, { @@ -229,9 +229,9 @@ } }, "params": { - "Region": "sa-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "sa-east-1" } }, { @@ -242,9 +242,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -255,9 +255,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -268,9 +268,9 @@ } }, "params": { - "Region": "us-east-2", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-east-2" } }, { @@ -281,9 +281,9 @@ } }, "params": { - "Region": "us-east-2", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-east-2" } }, { @@ -294,9 +294,9 @@ } }, "params": { - "Region": "us-west-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-west-1" } }, { @@ -307,9 +307,9 @@ } }, "params": { - "Region": "us-west-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-west-1" } }, { @@ -320,9 +320,9 @@ } }, "params": { - "Region": "us-west-2", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-west-2" } }, { @@ -333,9 +333,9 @@ } }, "params": { - "Region": "us-west-2", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-west-2" } }, { @@ -346,9 +346,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -359,9 +359,9 @@ } }, "params": { - "Region": "us-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -372,9 +372,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -385,9 +385,9 @@ } }, "params": { - "Region": "cn-northwest-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "cn-northwest-1" } }, { @@ -398,9 +398,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -411,9 +411,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -424,9 +424,9 @@ } }, "params": { - "Region": "cn-north-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -437,9 +437,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -450,9 +450,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -463,9 +463,9 @@ } }, "params": { - "Region": "us-gov-west-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-gov-west-1" } }, { @@ -476,9 +476,9 @@ } }, "params": { - "Region": "us-gov-west-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-gov-west-1" } }, { @@ -489,9 +489,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -502,9 +502,9 @@ } }, "params": { - "Region": "us-gov-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -515,9 +515,9 @@ } }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-iso-east-1" } }, { @@ -528,9 +528,9 @@ } }, "params": { - "Region": "us-iso-west-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-iso-west-1" } }, { @@ -539,9 +539,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-iso-east-1" } }, { @@ -552,9 +552,9 @@ } }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-iso-east-1" } }, { @@ -563,9 +563,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-iso-east-1" } }, { @@ -576,9 +576,9 @@ } }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": false, "UseFIPS": false, - "UseDualStack": false + "Region": "us-isob-east-1" } }, { @@ -587,9 +587,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": true, "UseFIPS": true, - "UseDualStack": true + "Region": "us-isob-east-1" } }, { @@ -600,9 +600,9 @@ } }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": false, "UseFIPS": true, - "UseDualStack": false + "Region": "us-isob-east-1" } }, { @@ -611,9 +611,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", + "UseDualStack": true, "UseFIPS": false, - "UseDualStack": true + "Region": "us-isob-east-1" } }, { @@ -624,9 +624,9 @@ } }, "params": { - "Region": "us-east-1", - "UseFIPS": false, "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -638,8 +638,8 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -649,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", - "UseFIPS": true, "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -661,9 +661,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", - "UseFIPS": false, "UseDualStack": true, + "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 9a7cd0ce3048..1fc35f8e0e71 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -709,6 +709,24 @@ }, "documentation":"Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.
For more information about importing an exported output value, see the Fn::ImportValue function.
" }, + "ListStackInstanceResourceDrifts":{ + "name":"ListStackInstanceResourceDrifts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackInstanceResourceDriftsInput"}, + "output":{ + "shape":"ListStackInstanceResourceDriftsOutput", + "resultWrapper":"ListStackInstanceResourceDriftsResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"StackInstanceNotFoundException"}, + {"shape":"OperationNotFoundException"} + ], + "documentation":"Returns drift information for resources in a stack instance.
ListStackInstanceResourceDrifts returns drift information for the most recent drift detection operation. If an operation is in progress, it may only return partial results.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM resources in CloudFormation templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect.
If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.
For more information about macros, see Using CloudFormation macros to perform custom processing on templates.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM resources in CloudFormation templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect.
If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.
For more information about macros, see Using CloudFormation macros to perform custom processing on templates.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.
You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.
Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.
For more information, see Using CloudFormation macros to perform custom processing on templates.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.
You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.
Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.
For more information, see Using CloudFormation macros to perform custom processing on templates.
Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default.
For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack.
" + "documentation":"Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default.
For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack.
" } }, "documentation":"The input for CreateStack action.
" @@ -1933,7 +1951,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.
In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.
The Amazon Resource Name (ARN) of the IAM role to use to create this stack set.
Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM role to use to create this stack set.
Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.
" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", @@ -3541,6 +3559,62 @@ } } }, + "ListStackInstanceResourceDriftsInput":{ + "type":"structure", + "required":[ + "StackSetName", + "StackInstanceAccount", + "StackInstanceRegion", + "OperationId" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetNameOrId", + "documentation":"The name or unique ID of the stack set that you want to list drifted resources for.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.
The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.
The resource drift status of the stack instance.
DELETED: The resource differs from its expected template configuration in that the resource has been deleted.
MODIFIED: One or more resource properties differ from their expected template values.
IN_SYNC: The resource's actual configuration matches its expected template configuration.
NOT_CHECKED: CloudFormation doesn't currently return this value.
The name of the Amazon Web Services account that you want to list resource drifts for.
" + }, + "StackInstanceRegion":{ + "shape":"Region", + "documentation":"The name of the Region where you want to list resource drifts.
" + }, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"The unique ID of the drift operation.
" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.
By default, SELF is specified. Use SELF for stack sets with self-managed permissions.
If you are signed in to the management account, specify SELF.
If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.
Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.
A list of StackInstanceResourceDriftSummary structures that contain information about the specified stack instances.
If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.
The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.
The default is 0 minutes.
If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack, for example) as necessary.
If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.
" } }, - "documentation":"Structure containing the rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.
Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.
" + "documentation":"Structure containing the rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.
Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.
" }, "RollbackStackInput":{ "type":"structure", @@ -5076,19 +5150,19 @@ }, "EnableTerminationProtection":{ "shape":"EnableTerminationProtection", - "documentation":"Whether termination protection is enabled for the stack.
For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide.
" + "documentation":"Whether termination protection is enabled for the stack.
For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide.
" }, "ParentId":{ "shape":"StackId", - "documentation":"For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.
For more information, see Working with Nested Stacks in the CloudFormation User Guide.
" + "documentation":"For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.
For more information, see Working with Nested Stacks in the CloudFormation User Guide.
" }, "RootId":{ "shape":"StackId", - "documentation":"For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.
For more information, see Working with Nested Stacks in the CloudFormation User Guide.
" + "documentation":"For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.
For more information, see Working with Nested Stacks in the CloudFormation User Guide.
" }, "DriftInformation":{ "shape":"StackDriftInformation", - "documentation":"Information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" + "documentation":"Information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" } }, "documentation":"The Stack data type.
" @@ -5331,7 +5405,8 @@ "type":"string", "enum":[ "DETAILED_STATUS", - "LAST_OPERATION_ID" + "LAST_OPERATION_ID", + "DRIFT_STATUS" ] }, "StackInstanceFilterValues":{ @@ -5343,7 +5418,7 @@ "StackInstanceFilters":{ "type":"list", "member":{"shape":"StackInstanceFilter"}, - "max":2 + "max":3 }, "StackInstanceNotFoundException":{ "type":"structure", @@ -5357,6 +5432,55 @@ }, "exception":true }, + "StackInstanceResourceDriftsSummaries":{ + "type":"list", + "member":{"shape":"StackInstanceResourceDriftsSummary"} + }, + "StackInstanceResourceDriftsSummary":{ + "type":"structure", + "required":[ + "StackId", + "LogicalResourceId", + "ResourceType", + "StackResourceDriftStatus", + "Timestamp" + ], + "members":{ + "StackId":{ + "shape":"StackId", + "documentation":"The ID of the stack instance.
" + }, + "LogicalResourceId":{ + "shape":"LogicalResourceId", + "documentation":"The logical name of the resource specified in the template.
" + }, + "PhysicalResourceId":{ + "shape":"PhysicalResourceId", + "documentation":"The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.
" + }, + "PhysicalResourceIdContext":{ + "shape":"PhysicalResourceIdContext", + "documentation":"Context information that enables CloudFormation to uniquely identify a resource. CloudFormation uses context key-value pairs in cases where a resource's logical and physical IDs aren't enough to uniquely identify that resource. Each context key-value pair specifies a unique resource that contains the targeted resource.
" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"Type of resource. For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide.
" + }, + "PropertyDifferences":{ + "shape":"PropertyDifferences", + "documentation":"Status of the actual configuration of the resource compared to its expected configuration. These will be present only for resources whose StackInstanceResourceDriftStatus is MODIFIED.
The drift status of the resource in a stack instance.
DELETED: The resource differs from its expected template configuration in that the resource has been deleted.
MODIFIED: One or more resource properties differ from their expected template values.
IN_SYNC: The resource's actual configuration matches its expected template configuration.
NOT_CHECKED: CloudFormation doesn't currently return this value.
Time at which the stack instance drift detection operation was initiated.
" + } + }, + "documentation":"The structure containing summary information about resource drifts for a stack instance.
" + }, "StackInstanceStatus":{ "type":"string", "enum":[ @@ -5504,7 +5628,7 @@ }, "DriftInformation":{ "shape":"StackResourceDriftInformation", - "documentation":"Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" + "documentation":"Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" }, "ModuleInfo":{ "shape":"ModuleInfo", @@ -5564,7 +5688,7 @@ }, "DriftInformation":{ "shape":"StackResourceDriftInformation", - "documentation":"Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" + "documentation":"Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" }, "ModuleInfo":{ "shape":"ModuleInfo", @@ -5628,7 +5752,7 @@ "documentation":"Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.
" } }, - "documentation":"Contains the drift information for a resource that has been checked for drift. This includes actual and expected property values for resources in which CloudFormation has detected drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.
Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.
" + "documentation":"Contains the drift information for a resource that has been checked for drift. This includes actual and expected property values for resources in which CloudFormation has detected drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.
Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.
" }, "StackResourceDriftInformation":{ "type":"structure", @@ -5718,7 +5842,7 @@ }, "DriftInformation":{ "shape":"StackResourceDriftInformationSummary", - "documentation":"Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" + "documentation":"Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" }, "ModuleInfo":{ "shape":"ModuleInfo", @@ -5772,7 +5896,7 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"The Amazon Resource Name (ARN) of the IAM role used to create or update the stack set.
Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM role used to create or update the stack set.
Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.
" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", @@ -5921,7 +6045,7 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"The Amazon Resource Name (ARN) of the IAM role used to perform this stack set operation.
Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the CloudFormation User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM role used to perform this stack set operation.
Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the CloudFormation User Guide.
" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", @@ -6233,15 +6357,15 @@ }, "ParentId":{ "shape":"StackId", - "documentation":"For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.
For more information, see Working with Nested Stacks in the CloudFormation User Guide.
" + "documentation":"For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.
For more information, see Working with Nested Stacks in the CloudFormation User Guide.
" }, "RootId":{ "shape":"StackId", - "documentation":"For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.
For more information, see Working with Nested Stacks in the CloudFormation User Guide.
" + "documentation":"For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.
For more information, see Working with Nested Stacks in the CloudFormation User Guide.
" }, "DriftInformation":{ "shape":"StackDriftInformationSummary", - "documentation":"Summarizes information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" + "documentation":"Summarizes information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" } }, "documentation":"The StackSummary Data Type
" @@ -6780,7 +6904,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.
You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.
Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.
For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.
If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.
You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.
Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.
For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.
In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.
CAPABILITY_IAM and CAPABILITY_NAMED_IAM
Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.
The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.
If you have IAM resources, you can specify either capability.
If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.
If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.
If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.
For more information, see Acknowledging IAM Resources in CloudFormation Templates.
CAPABILITY_AUTO_EXPAND
Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.
Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.
The Amazon Resource Name (ARN) of the IAM role to use to update this stack set.
Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the CloudFormation User Guide.
If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM role to use to update this stack set.
Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the CloudFormation User Guide.
If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously.
" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", From be5bb59eba25cf8eba5b7d23114643bfcbb36e44 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 24 Jul 2023 18:17:15 +0000 Subject: [PATCH 014/270] AmazonApiGatewayV2 Update: Documentation updates for Amazon API Gateway. --- .../feature-AmazonApiGatewayV2-5412cd3.json | 6 + .../codegen-resources/endpoint-rule-set.json | 399 +++-- .../codegen-resources/endpoint-tests.json | 1591 +++-------------- .../codegen-resources/service-2.json | 14 +- 4 files changed, 450 insertions(+), 1560 deletions(-) create mode 100644 .changes/next-release/feature-AmazonApiGatewayV2-5412cd3.json diff --git a/.changes/next-release/feature-AmazonApiGatewayV2-5412cd3.json b/.changes/next-release/feature-AmazonApiGatewayV2-5412cd3.json new file mode 100644 index 000000000000..fe02946d89ca --- /dev/null +++ b/.changes/next-release/feature-AmazonApiGatewayV2-5412cd3.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AmazonApiGatewayV2", + "contributor": "", + "description": "Documentation updates for Amazon API Gateway." +} diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 943cb9b58ac7..620ab96f2a3e 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://apigateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://apigateway-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://apigateway.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://apigateway.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://apigateway.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-tests.json b/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-tests.json index 6d936f9ae2e3..3d5b86aed1f7 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,146 +1,146 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-south-2.api.aws" + "url": "https://apigateway.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-south-2.amazonaws.com" + "url": "https://apigateway.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": true + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-south-2.api.aws" + "url": "https://apigateway.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-south-2.amazonaws.com" + "url": "https://apigateway.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-south-1.api.aws" + "url": "https://apigateway.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-south-1.amazonaws.com" + "url": "https://apigateway.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-south-1.api.aws" + "url": "https://apigateway.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-south-1.amazonaws.com" + "url": "https://apigateway.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-south-1.api.aws" + "url": "https://apigateway.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-south-1.amazonaws.com" + "url": "https://apigateway.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-south-1.api.aws" + "url": "https://apigateway.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -151,1574 +151,417 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-south-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-south-2.api.aws" + "url": "https://apigateway.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-south-2.amazonaws.com" + "url": "https://apigateway.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-south-2.api.aws" + "url": "https://apigateway.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-south-2.amazonaws.com" + "url": "https://apigateway.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": false + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-gov-east-1.api.aws" + "url": "https://apigateway.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-gov-east-1.amazonaws.com" + "url": "https://apigateway.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-gov-east-1.api.aws" + "url": "https://apigateway.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-gov-east-1.amazonaws.com" + "url": "https://apigateway.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.me-central-1.api.aws" + "url": "https://apigateway.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.me-central-1.amazonaws.com" + "url": "https://apigateway-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.me-central-1.api.aws" + "url": "https://apigateway-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.me-central-1.amazonaws.com" + "url": "https://apigateway.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ca-central-1.api.aws" + "url": "https://apigateway.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ca-central-1.amazonaws.com" + "url": "https://apigateway.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.ca-central-1.api.aws" + "url": "https://apigateway-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ca-central-1.amazonaws.com" + "url": "https://apigateway-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-central-1.api.aws" + "url": "https://apigateway.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-central-1.amazonaws.com" + "url": "https://apigateway.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-central-1.api.aws" + "url": "https://apigateway.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-central-1.amazonaws.com" + "url": "https://apigateway-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-central-2.api.aws" + "url": "https://apigateway-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-2", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-central-2.amazonaws.com" + "url": "https://apigateway.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-2", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-central-2.api.aws" + "url": "https://apigateway.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "eu-central-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-west-1.api.aws" + "url": "https://apigateway-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-1.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway.us-west-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-west-1.amazonaws.com" + "url": "https://apigateway-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-west-2.amazonaws.com" + "url": "https://apigateway.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-west-2.api.aws" + "url": "https://example.com" } }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-4.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-4", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-4.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-4", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-4.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-4", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-4.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-4", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "Region": "us-isob-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-isob-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseDualStack": true, - "Region": "us-isob-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { + "UseFIPS": false, "UseDualStack": false, - "Region": "us-isob-east-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1728,9 +571,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1740,11 +583,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json index 73b4de0df2a6..367f45df1af1 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json @@ -585,7 +585,7 @@ "shape" : "TooManyRequestsException", "documentation" : "The client is sending more than the allowed number of requests per unit of time.
" } ], - "documentation" : "Deletes a route request parameter.
" + "documentation" : "Deletes a route request parameter. Supported only for WebSocket APIs.
" }, "DeleteRouteResponse" : { "name" : "DeleteRouteResponse", @@ -3089,7 +3089,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API itegrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" + "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -5047,7 +5047,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API itegrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" + "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -6033,7 +6033,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API itegrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" + "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -7615,7 +7615,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to the backend. The key should follow the pattern <action>:<header|querystring|path>.<location> where action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" + "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to the backend. The key should follow the pattern <action>:<header|querystring|path>.<location> where action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -7718,7 +7718,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to the backend. The key should follow the pattern <action>:<header|querystring|path>.<location> where action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" + "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to the backend. The key should follow the pattern <action>:<header|querystring|path>.<location> where action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -7825,7 +7825,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API itegrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" + "documentation" : "For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.
For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.
For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.
" }, "RequestTemplates" : { "shape" : "TemplateMap", From b7a62c8af9bb84621d49fb0c7ac9f49c5177ce72 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 24 Jul 2023 18:18:10 +0000 Subject: [PATCH 015/270] Updated endpoints.json and partitions.json. --- .changes/next-release/feature-AWSSDKforJavav2-0443982.json | 6 ++++++ .../amazon/awssdk/regions/internal/region/endpoints.json | 4 ++++ 2 files changed, 10 insertions(+) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json new file mode 100644 index 000000000000..e5b5ee3ca5e3 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." +} diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index a5cf935e9d5c..330334797585 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -1461,16 +1461,20 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, From 101d304b7cdfd5b65317948b1027aa5de2e5e6d9 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 24 Jul 2023 18:19:11 +0000 Subject: [PATCH 016/270] Release 2.20.110. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.110.json | 54 +++++++++++++++++++ .../feature-AWSCloudFormation-9f2265f.json | 6 --- ...eature-AWSCostExplorerService-21849f3.json | 6 --- .../next-release/feature-AWSGlue-8107ac3.json | 6 --- .../feature-AWSSDKforJavav2-0443982.json | 6 --- .../feature-AmazonApiGatewayV2-5412cd3.json | 6 --- ...-AmazonChimeSDKMediaPipelines-673d1ee.json | 6 --- ...ure-AmazonElasticComputeCloud-93ef22d.json | 6 --- .../feature-AmazonQuickSight-e8a6315.json | 6 --- CHANGELOG.md | 33 ++++++++++++ README.md | 8 +-- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- 425 files changed, 505 insertions(+), 466 deletions(-) create mode 100644 .changes/2.20.110.json delete mode 100644 .changes/next-release/feature-AWSCloudFormation-9f2265f.json delete mode 100644 .changes/next-release/feature-AWSCostExplorerService-21849f3.json delete mode 100644 .changes/next-release/feature-AWSGlue-8107ac3.json delete mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json delete mode 100644 .changes/next-release/feature-AmazonApiGatewayV2-5412cd3.json delete mode 100644 .changes/next-release/feature-AmazonChimeSDKMediaPipelines-673d1ee.json delete mode 100644 .changes/next-release/feature-AmazonElasticComputeCloud-93ef22d.json delete mode 100644 .changes/next-release/feature-AmazonQuickSight-e8a6315.json diff --git a/.changes/2.20.110.json b/.changes/2.20.110.json new file mode 100644 index 000000000000..5861938b3401 --- /dev/null +++ b/.changes/2.20.110.json @@ -0,0 +1,54 @@ +{ + "version": "2.20.110", + "date": "2023-07-24", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Added support for Data Preparation Recipe node in Glue Studio jobs" + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "contributor": "", + "description": "Documentation updates for Amazon API Gateway." + }, + { + "type": "feature", + "category": "Amazon Chime SDK Media Pipelines", + "contributor": "", + "description": "AWS Media Pipeline compositing enhancement and Media Insights Pipeline auto language identification." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Add \"disabled\" enum value to SpotInstanceState." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AWSCloudFormation-9f2265f.json b/.changes/next-release/feature-AWSCloudFormation-9f2265f.json deleted file mode 100644 index d2ab8cb7fbae..000000000000 --- a/.changes/next-release/feature-AWSCloudFormation-9f2265f.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS CloudFormation", - "contributor": "", - "description": "This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts." -} diff --git a/.changes/next-release/feature-AWSCostExplorerService-21849f3.json b/.changes/next-release/feature-AWSCostExplorerService-21849f3.json deleted file mode 100644 index ce2d2f445cc1..000000000000 --- a/.changes/next-release/feature-AWSCostExplorerService-21849f3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Cost Explorer Service", - "contributor": "", - "description": "This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID." -} diff --git a/.changes/next-release/feature-AWSGlue-8107ac3.json b/.changes/next-release/feature-AWSGlue-8107ac3.json deleted file mode 100644 index 59a69b78fe9d..000000000000 --- a/.changes/next-release/feature-AWSGlue-8107ac3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Glue", - "contributor": "", - "description": "Added support for Data Preparation Recipe node in Glue Studio jobs" -} diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json deleted file mode 100644 index e5b5ee3ca5e3..000000000000 --- a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS SDK for Java v2", - "contributor": "", - "description": "Updated endpoint and partition metadata." -} diff --git a/.changes/next-release/feature-AmazonApiGatewayV2-5412cd3.json b/.changes/next-release/feature-AmazonApiGatewayV2-5412cd3.json deleted file mode 100644 index fe02946d89ca..000000000000 --- a/.changes/next-release/feature-AmazonApiGatewayV2-5412cd3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AmazonApiGatewayV2", - "contributor": "", - "description": "Documentation updates for Amazon API Gateway." -} diff --git a/.changes/next-release/feature-AmazonChimeSDKMediaPipelines-673d1ee.json b/.changes/next-release/feature-AmazonChimeSDKMediaPipelines-673d1ee.json deleted file mode 100644 index c9322159a4af..000000000000 --- a/.changes/next-release/feature-AmazonChimeSDKMediaPipelines-673d1ee.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Chime SDK Media Pipelines", - "contributor": "", - "description": "AWS Media Pipeline compositing enhancement and Media Insights Pipeline auto language identification." -} diff --git a/.changes/next-release/feature-AmazonElasticComputeCloud-93ef22d.json b/.changes/next-release/feature-AmazonElasticComputeCloud-93ef22d.json deleted file mode 100644 index 45b928d91b24..000000000000 --- a/.changes/next-release/feature-AmazonElasticComputeCloud-93ef22d.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Elastic Compute Cloud", - "contributor": "", - "description": "Add \"disabled\" enum value to SpotInstanceState." -} diff --git a/.changes/next-release/feature-AmazonQuickSight-e8a6315.json b/.changes/next-release/feature-AmazonQuickSight-e8a6315.json deleted file mode 100644 index 926acdb4c129..000000000000 --- a/.changes/next-release/feature-AmazonQuickSight-e8a6315.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon QuickSight", - "contributor": "", - "description": "This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 93a8806f1983..0839e85ecda1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,36 @@ +# __2.20.110__ __2023-07-24__ +## __AWS CloudFormation__ + - ### Features + - This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts. + +## __AWS Cost Explorer Service__ + - ### Features + - This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID. + +## __AWS Glue__ + - ### Features + - Added support for Data Preparation Recipe node in Glue Studio jobs + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Chime SDK Media Pipelines__ + - ### Features + - AWS Media Pipeline compositing enhancement and Media Insights Pipeline auto language identification. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Add "disabled" enum value to SpotInstanceState. + +## __Amazon QuickSight__ + - ### Features + - This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips. + +## __AmazonApiGatewayV2__ + - ### Features + - Documentation updates for Amazon API Gateway. + # __2.20.109__ __2023-07-21__ ## __AWS Elemental MediaConvert__ - ### Features diff --git a/README.md b/README.md index 547df2580026..a1bbdae7a5a2 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same verThe Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.
If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.
A single Scan operation reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.
Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.
The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.
If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result can result in no items meeting the criteria and the Count will result in zero. If you did not use a FilterExpression in the scan request, then Count is the same as ScannedCount.
Count and ScannedCount only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table.
A single Scan operation first reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.
By default, a Scan uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead parameter to true. Strong consistency only relates to the consistency of the read at the item level.
DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested.
The response to each PartiQL statement in the batch.
" + "documentation":"The response to each PartiQL statement in the batch. The values of the list are ordered according to the ordering of the request statements.
" }, "ConsumedCapacity":{ "shape":"ConsumedCapacityMultiple", @@ -5938,7 +5938,7 @@ }, "ReturnValuesOnConditionCheckFailure":{ "shape":"ReturnValuesOnConditionCheckFailure", - "documentation":"Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.
Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid values are: NONE and ALL_OLD.
Represents a request to perform an UpdateItem operation.
Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This operation applies only to Aurora DB clusters.
Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.
You can create a global database that is initially empty, and then create the primary and secondary DB clusters in the global database. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.
This operation applies only to Aurora DB clusters.
Modify a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
This action only applies to Aurora DB clusters.
Modifies a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
This operation only applies to Aurora global database clusters.
The identifier of the source DB cluster if this DB instance is a read replica.
" + }, + "PercentProgress":{ + "shape":"String", + "documentation":"The progress of the storage optimization operation as a percentage.
" } }, "documentation":"Contains the details of an Amazon RDS DB instance.
This data type is used as a response element in the operations CreateDBInstance, CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance.
The CA certificate identifier to use for the DB instance6's server certificate.
This setting doesn't apply to RDS Custom DB instances.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
" + "documentation":"The CA certificate identifier to use for the DB instance's server certificate.
This setting doesn't apply to RDS Custom DB instances.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
" }, "Domain":{ "shape":"String", @@ -11067,23 +11071,23 @@ "members":{ "GlobalClusterIdentifier":{ "shape":"String", - "documentation":"The DB cluster identifier for the global cluster being modified. This parameter isn't case-sensitive.
Constraints:
Must match the identifier of an existing global database cluster.
The cluster identifier for the global cluster to modify. This parameter isn't case-sensitive.
Constraints:
Must match the identifier of an existing global database cluster.
The new cluster identifier for the global database cluster when modifying a global database cluster. This value is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 letters, numbers, or hyphens
The first character must be a letter
Can't end with a hyphen or contain two consecutive hyphens
Example: my-cluster2
The new cluster identifier for the global database cluster. This value is stored as a lowercase string.
Constraints:
Must contain from 1 to 63 letters, numbers, or hyphens.
The first character must be a letter.
Can't end with a hyphen or contain two consecutive hyphens.
Example: my-cluster2
Indicates if the global database cluster has deletion protection enabled. The global database cluster can't be deleted when deletion protection is enabled.
" + "documentation":"Specifies whether to enable deletion protection for the global database cluster. The global database cluster can't be deleted when deletion protection is enabled.
" }, "EngineVersion":{ "shape":"String", - "documentation":"The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.
To list all of the available engine versions for aurora-mysql (for MySQL-based Aurora global databases), use the following command:
aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'
To list all of the available engine versions for aurora-postgresql (for PostgreSQL-based Aurora global databases), use the following command:
aws rds describe-db-engine-versions --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'
The version number of the database engine to which you want to upgrade.
To list all of the available engine versions for aurora-mysql (for MySQL-based Aurora global databases), use the following command:
aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'
To list all of the available engine versions for aurora-postgresql (for PostgreSQL-based Aurora global databases), use the following command:
aws rds describe-db-engine-versions --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'
A value that indicates whether major version upgrades are allowed.
Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version.
If you upgrade the major version of a global database, the cluster and DB instance parameter groups are set to the default parameter groups for the new version. Apply any custom parameter groups after completing the upgrade.
" + "documentation":"Specifies whether to allow major version upgrades.
Constraints: Must be enabled if you specify a value for the EngineVersion parameter that's a different major version than the global cluster's current version.
If you upgrade the major version of a global database, the cluster and DB instance parameter groups are set to the default parameter groups for the new version. Apply any custom parameter groups after completing the upgrade.
" } } }, From b63383668b728b0d7eb67603b507cf36ca409cb6 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:12:56 +0000 Subject: [PATCH 021/270] AWS Lambda Update: Add Python 3.11 (python3.11) support to AWS Lambda --- .changes/next-release/feature-AWSLambda-bad128e.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-AWSLambda-bad128e.json diff --git a/.changes/next-release/feature-AWSLambda-bad128e.json b/.changes/next-release/feature-AWSLambda-bad128e.json new file mode 100644 index 000000000000..3451a21e296a --- /dev/null +++ b/.changes/next-release/feature-AWSLambda-bad128e.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Add Python 3.11 (python3.11) support to AWS Lambda" +} diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index cce6d7d736b0..2b93c96071f5 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -5229,7 +5229,8 @@ "nodejs18.x", "python3.10", "java17", - "ruby3.2" + "ruby3.2", + "python3.11" ] }, "RuntimeVersionArn":{ From 2329a0782c9c05d281ec9a06d995c6217e52d7fc Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:13:00 +0000 Subject: [PATCH 022/270] AWS SecurityHub Update: Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters --- .../feature-AWSSecurityHub-599cc29.json | 6 + .../codegen-resources/service-2.json | 110 ++++++++++++++++-- 2 files changed, 104 insertions(+), 12 deletions(-) create mode 100644 .changes/next-release/feature-AWSSecurityHub-599cc29.json diff --git a/.changes/next-release/feature-AWSSecurityHub-599cc29.json b/.changes/next-release/feature-AWSSecurityHub-599cc29.json new file mode 100644 index 000000000000..1203c32734b4 --- /dev/null +++ b/.changes/next-release/feature-AWSSecurityHub-599cc29.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters" +} diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index b6f2a5044c17..db6338f2b77f 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -1504,7 +1504,7 @@ }, "IsTerminal":{ "shape":"Boolean", - "documentation":"Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
The default value of this field is false.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.
" }, "Criteria":{ "shape":"AutomationRulesFindingFilters", @@ -1737,7 +1737,7 @@ }, "IsTerminal":{ "shape":"Boolean", - "documentation":" Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
The default value of this field is false.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.
" }, "CreatedAt":{ "shape":"Timestamp", @@ -2544,6 +2544,62 @@ }, "documentation":"Specifies the authorization configuration for using Amazon Cognito user pools with your AppSync GraphQL API endpoint.
" }, + "AwsAthenaWorkGroupConfigurationDetails":{ + "type":"structure", + "members":{ + "ResultConfiguration":{ + "shape":"AwsAthenaWorkGroupConfigurationResultConfigurationDetails", + "documentation":"The location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. These are known as client-side settings. If workgroup settings override client-side settings, then the query uses the workgroup settings.
" + } + }, + "documentation":"The configuration of the workgroup, which includes the location in Amazon Simple Storage Service (Amazon S3) where query results are stored, the encryption option, if any, used for query results, whether Amazon CloudWatch metrics are enabled for the workgroup, and the limit for the amount of bytes scanned (cutoff) per query, if it is specified.
" + }, + "AwsAthenaWorkGroupConfigurationResultConfigurationDetails":{ + "type":"structure", + "members":{ + "EncryptionConfiguration":{ + "shape":"AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails", + "documentation":"Specifies the method used to encrypt the user’s data stores in the Athena workgroup.
" + } + }, + "documentation":"The location in Amazon Simple Storage Service (Amazon S3) where query and calculation results are stored and the encryption option, if any, used for query and calculation results. These are known as client-side settings. If workgroup settings override client-side settings, then the query uses the workgroup settings.
" + }, + "AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails":{ + "type":"structure", + "members":{ + "EncryptionOption":{ + "shape":"NonEmptyString", + "documentation":"Indicates whether Amazon Simple Storage Service (Amazon S3) server-side encryption with Amazon S3 managed keys (SSE_S3), server-side encryption with KMS keys (SSE_KMS), or client-side encryption with KMS customer managed keys (CSE_KMS) is used.
" + }, + "KmsKey":{ + "shape":"NonEmptyString", + "documentation":" For SSE_KMS and CSE_KMS, this is the KMS key Amazon Resource Name (ARN) or ID.
Specifies the method used to encrypt the user’s data stores in the Athena workgroup.
" + }, + "AwsAthenaWorkGroupDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"The workgroup name.
" + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"The workgroup description.
" + }, + "State":{ + "shape":"NonEmptyString", + "documentation":"Whether the workgroup is enabled or disabled.
" + }, + "Configuration":{ + "shape":"AwsAthenaWorkGroupConfigurationDetails", + "documentation":"The configuration of the workgroup, which includes the location in Amazon Simple Storage Service (Amazon S3) where query results are stored, the encryption option, if any, used for query results, whether Amazon CloudWatch metrics are enabled for the workgroup, and the limit for the amount of bytes scanned (cutoff) per query, if it is specified.
" + } + }, + "documentation":"Provides information about an Amazon Athena workgroup.
" + }, "AwsAutoScalingAutoScalingGroupAvailabilityZonesList":{ "type":"list", "member":{"shape":"AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails"} @@ -10406,6 +10462,24 @@ "type":"list", "member":{"shape":"AwsRdsDbClusterOptionGroupMembership"} }, + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"NonEmptyString", + "documentation":" The name of the manual DB cluster snapshot attribute. The attribute named restore refers to the list of Amazon Web Services accounts that have permission to copy or restore the manual DB cluster snapshot.
The value(s) for the manual DB cluster snapshot attribute. If the AttributeName field is set to restore, then this element returns a list of IDs of the Amazon Web Services accounts that are authorized to copy or restore the manual DB cluster snapshot. If a value of all is in the list, then the manual DB cluster snapshot is public and available for any Amazon Web Services account to copy or restore.
Contains the name and values of a manual Amazon Relational Database Service (RDS) DB cluster snapshot attribute.
" + }, + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttributes":{ + "type":"list", + "member":{"shape":"AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute"} + }, "AwsRdsDbClusterSnapshotDetails":{ "type":"structure", "members":{ @@ -10480,6 +10554,10 @@ "IamDatabaseAuthenticationEnabled":{ "shape":"Boolean", "documentation":"Whether mapping of IAM accounts to database accounts is enabled.
" + }, + "DbClusterSnapshotAttributes":{ + "shape":"AwsRdsDbClusterSnapshotDbClusterSnapshotAttributes", + "documentation":"Contains the name and values of a manual DB cluster snapshot attribute.
" } }, "documentation":"Information about an Amazon RDS DB cluster snapshot.
" @@ -14712,7 +14790,7 @@ }, "IsTerminal":{ "shape":"Boolean", - "documentation":"Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. The default value of this field is false.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.
" }, "Criteria":{ "shape":"AutomationRulesFindingFilters", @@ -16558,20 +16636,22 @@ }, "Value":{ "shape":"NonEmptyString", - "documentation":"The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called Department might be Security. If you provide security as the filter value, then there is no match.
The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called Department might be Security. If you provide security as the filter value, then there's no match.
The condition to apply to the key value when querying for findings with a map filter.
To search for values that exactly match the filter value, use EQUALS. For example, for the ResourceTags field, the filter Department EQUALS Security matches findings that have the value Security for the tag Department.
To search for values other than the filter value, use NOT_EQUALS. For example, for the ResourceTags field, the filter Department NOT_EQUALS Finance matches findings that do not have the value Finance for the tag Department.
EQUALS filters on the same field are joined by OR. A finding matches if it matches any one of those filters.
NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters.
You cannot have both an EQUALS filter and a NOT_EQUALS filter on the same field.
The condition to apply to the key value when filtering Security Hub findings with a map filter.
To search for values that have the filter value, use one of the following comparison operators:
To search for values that include the filter value, use CONTAINS. For example, for the ResourceTags field, the filter Department CONTAINS Security matches findings that include the value Security for the Department tag. In the same example, a finding with a value of Security team for the Department tag is a match.
To search for values that exactly match the filter value, use EQUALS. For example, for the ResourceTags field, the filter Department EQUALS Security matches findings that have the value Security for the Department tag.
CONTAINS and EQUALS filters on the same field are joined by OR. A finding matches if it matches any one of those filters. For example, the filters Department CONTAINS Security OR Department CONTAINS Finance match a finding that includes either Security, Finance, or both values.
To search for values that don't have the filter value, use one of the following comparison operators:
To search for values that exclude the filter value, use NOT_CONTAINS. For example, for the ResourceTags field, the filter Department NOT_CONTAINS Finance matches findings that exclude the value Finance for the Department tag.
To search for values other than the filter value, use NOT_EQUALS. For example, for the ResourceTags field, the filter Department NOT_EQUALS Finance matches findings that don’t have the value Finance for the Department tag.
NOT_CONTAINS and NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters. For example, the filters Department NOT_CONTAINS Security AND Department NOT_CONTAINS Finance match a finding that excludes both the Security and Finance values.
CONTAINS filters can only be used with other CONTAINS filters. NOT_CONTAINS filters can only be used with other NOT_CONTAINS filters.
You can’t have both a CONTAINS filter and a NOT_CONTAINS filter on the same field. Similarly, you can’t have both an EQUALS filter and a NOT_EQUALS filter on the same field. Combining filters in this way returns an error.
CONTAINS and NOT_CONTAINS operators can be used only with automation rules. For more information, see Automation rules in the Security Hub User Guide.
A map filter for querying findings. Each map filter provides the field to check, the value to look for, and the comparison operator.
" + "documentation":"A map filter for filtering Security Hub findings. Each map filter provides the field to check for, the value to check for, and the comparison operator.
" }, "MapFilterComparison":{ "type":"string", "enum":[ "EQUALS", - "NOT_EQUALS" + "NOT_EQUALS", + "CONTAINS", + "NOT_CONTAINS" ] }, "MapFilterList":{ @@ -17625,6 +17705,10 @@ "AwsStepFunctionStateMachine":{ "shape":"AwsStepFunctionStateMachineDetails", "documentation":"Provides details about an Step Functions state machine, which is a workflow consisting of a series of event-driven steps.
" + }, + "AwsAthenaWorkGroup":{ + "shape":"AwsAthenaWorkGroupDetails", + "documentation":"Provides information about an Amazon Athena workgroup. A workgroup helps you separate users, teams, applications, or workloads. It also helps you set limits on data processing and track costs.
" } }, "documentation":"Additional details about a resource related to a finding.
To provide the details, use the object that corresponds to the resource type. For example, if the resource type is AwsEc2Instance, then you use the AwsEc2Instance object to provide the details.
If the type-specific object does not contain all of the fields you want to populate, then you use the Other object to populate those additional fields.
You also use the Other object to populate the details when the selected type does not have a corresponding object.
The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is Security Hub. If you provide security hub as the filter text, then there is no match.
The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is Security Hub. If you provide security hub as the filter value, there's no match.
The condition to apply to a string value when querying for findings. To search for values that contain the filter criteria value, use one of the following comparison operators:
To search for values that exactly match the filter value, use EQUALS.
For example, the filter ResourceType EQUALS AwsEc2SecurityGroup only matches findings that have a resource type of AwsEc2SecurityGroup.
To search for values that start with the filter value, use PREFIX.
For example, the filter ResourceType PREFIX AwsIam matches findings that have a resource type that starts with AwsIam. Findings with a resource type of AwsIamPolicy, AwsIamRole, or AwsIamUser would all match.
EQUALS and PREFIX filters on the same field are joined by OR. A finding matches if it matches any one of those filters.
To search for values that do not contain the filter criteria value, use one of the following comparison operators:
To search for values that do not exactly match the filter value, use NOT_EQUALS.
For example, the filter ResourceType NOT_EQUALS AwsIamPolicy matches findings that have a resource type other than AwsIamPolicy.
To search for values that do not start with the filter value, use PREFIX_NOT_EQUALS.
For example, the filter ResourceType PREFIX_NOT_EQUALS AwsIam matches findings that have a resource type that does not start with AwsIam. Findings with a resource type of AwsIamPolicy, AwsIamRole, or AwsIamUser would all be excluded from the results.
NOT_EQUALS and PREFIX_NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters.
For filters on the same field, you cannot provide both an EQUALS filter and a NOT_EQUALS or PREFIX_NOT_EQUALS filter. Combining filters in this way always returns an error, even if the provided filter values would return valid results.
You can combine PREFIX filters with NOT_EQUALS or PREFIX_NOT_EQUALS filters for the same field. Security Hub first processes the PREFIX filters, then the NOT_EQUALS or PREFIX_NOT_EQUALS filters.
For example, for the following filter, Security Hub first identifies findings that have resource types that start with either AwsIAM or AwsEc2. It then excludes findings that have a resource type of AwsIamPolicy and findings that have a resource type of AwsEc2NetworkInterface.
ResourceType PREFIX AwsIam
ResourceType PREFIX AwsEc2
ResourceType NOT_EQUALS AwsIamPolicy
ResourceType NOT_EQUALS AwsEc2NetworkInterface
The condition to apply to a string value when filtering Security Hub findings.
To search for values that have the filter value, use one of the following comparison operators:
To search for values that include the filter value, use CONTAINS. For example, the filter Title CONTAINS CloudFront matches findings that have a Title that includes the string CloudFront.
To search for values that exactly match the filter value, use EQUALS. For example, the filter AwsAccountId EQUALS 123456789012 only matches findings that have an account ID of 123456789012.
To search for values that start with the filter value, use PREFIX. For example, the filter ResourceRegion PREFIX us matches findings that have a ResourceRegion that starts with us. A ResourceRegion that starts with a different value, such as af, ap, or ca, doesn't match.
CONTAINS, EQUALS, and PREFIX filters on the same field are joined by OR. A finding matches if it matches any one of those filters. For example, the filters Title CONTAINS CloudFront OR Title CONTAINS CloudWatch match a finding that includes either CloudFront, CloudWatch, or both strings in the title.
To search for values that don’t have the filter value, use one of the following comparison operators:
To search for values that exclude the filter value, use NOT_CONTAINS. For example, the filter Title NOT_CONTAINS CloudFront matches findings that have a Title that excludes the string CloudFront.
To search for values other than the filter value, use NOT_EQUALS. For example, the filter AwsAccountId NOT_EQUALS 123456789012 only matches findings that have an account ID other than 123456789012.
To search for values that don't start with the filter value, use PREFIX_NOT_EQUALS. For example, the filter ResourceRegion PREFIX_NOT_EQUALS us matches findings with a ResourceRegion that starts with a value other than us.
NOT_CONTAINS, NOT_EQUALS, and PREFIX_NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters. For example, the filters Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS CloudWatch match a finding that excludes both CloudFront and CloudWatch in the title.
You can’t have both a CONTAINS filter and a NOT_CONTAINS filter on the same field. Similarly, you can't provide both an EQUALS filter and a NOT_EQUALS or PREFIX_NOT_EQUALS filter on the same field. Combining filters in this way returns an error. CONTAINS filters can only be used with other CONTAINS filters. NOT_CONTAINS filters can only be used with other NOT_CONTAINS filters.
You can combine PREFIX filters with NOT_EQUALS or PREFIX_NOT_EQUALS filters for the same field. Security Hub first processes the PREFIX filters, and then the NOT_EQUALS or PREFIX_NOT_EQUALS filters.
For example, for the following filters, Security Hub first identifies findings that have resource types that start with either AwsIam or AwsEc2. It then excludes findings that have a resource type of AwsIamPolicy and findings that have a resource type of AwsEc2NetworkInterface.
ResourceType PREFIX AwsIam
ResourceType PREFIX AwsEc2
ResourceType NOT_EQUALS AwsIamPolicy
ResourceType NOT_EQUALS AwsEc2NetworkInterface
CONTAINS and NOT_CONTAINS operators can be used only with automation rules. For more information, see Automation rules in the Security Hub User Guide.
A string filter for querying findings.
" + "documentation":"A string filter for filtering Security Hub findings.
" }, "StringFilterComparison":{ "type":"string", @@ -18793,7 +18877,9 @@ "EQUALS", "PREFIX", "NOT_EQUALS", - "PREFIX_NOT_EQUALS" + "PREFIX_NOT_EQUALS", + "CONTAINS", + "NOT_CONTAINS" ] }, "StringFilterList":{ @@ -19132,7 +19218,7 @@ }, "IsTerminal":{ "shape":"Boolean", - "documentation":"Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
The default value of this field is false.
Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.
" }, "Criteria":{ "shape":"AutomationRulesFindingFilters", From 53c7e4d2a16b30111d0b4766562cd8551219f0a4 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:12:57 +0000 Subject: [PATCH 023/270] AWS DataSync Update: AWS DataSync now supports Microsoft Azure Blob Storage locations. --- .../feature-AWSDataSync-1cb444c.json | 6 + .../codegen-resources/service-2.json | 267 ++++++++++++++++-- 2 files changed, 249 insertions(+), 24 deletions(-) create mode 100644 .changes/next-release/feature-AWSDataSync-1cb444c.json diff --git a/.changes/next-release/feature-AWSDataSync-1cb444c.json b/.changes/next-release/feature-AWSDataSync-1cb444c.json new file mode 100644 index 000000000000..52cd67a7986a --- /dev/null +++ b/.changes/next-release/feature-AWSDataSync-1cb444c.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "AWS DataSync now supports Microsoft Azure Blob Storage locations." +} diff --git a/services/datasync/src/main/resources/codegen-resources/service-2.json b/services/datasync/src/main/resources/codegen-resources/service-2.json index 26461af8c8c5..db15acce7771 100644 --- a/services/datasync/src/main/resources/codegen-resources/service-2.json +++ b/services/datasync/src/main/resources/codegen-resources/service-2.json @@ -57,6 +57,20 @@ ], "documentation":"Activates an DataSync agent that you've deployed in your storage environment. The activation process associates the agent with your Amazon Web Services account.
If you haven't deployed an agent yet, see the following topics to learn more:
If you're transferring between Amazon Web Services storage services, you don't need a DataSync agent.
Creates an endpoint for a Microsoft Azure Blob Storage container that DataSync can use as a transfer source or destination.
Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.
" + }, "CreateLocationEfs":{ "name":"CreateLocationEfs", "http":{ @@ -153,7 +167,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Defines a file system on a Network File System (NFS) server that can be read from or written to.
" + "documentation":"Creates an endpoint for an Network File System (NFS) file server that DataSync can use for a data transfer.
" }, "CreateLocationObjectStorage":{ "name":"CreateLocationObjectStorage", @@ -195,7 +209,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Creates an endpoint for a Server Message Block (SMB) file server that DataSync can access for a transfer. For more information, see Creating an SMB location.
" + "documentation":"Creates an endpoint for a Server Message Block (SMB) file server that DataSync can use for a data transfer.
Before you begin, make sure that you understand how DataSync accesses an SMB file server.
" }, "CreateTask":{ "name":"CreateTask", @@ -282,6 +296,20 @@ "documentation":"Returns information about a DataSync discovery job.
", "endpoint":{"hostPrefix":"discovery-"} }, + "DescribeLocationAzureBlob":{ + "name":"DescribeLocationAzureBlob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationAzureBlobRequest"}, + "output":{"shape":"DescribeLocationAzureBlobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"Provides details about how an DataSync transfer location for Microsoft Azure Blob Storage is configured.
" + }, "DescribeLocationEfs":{ "name":"DescribeLocationEfs", "http":{ @@ -479,7 +507,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Returns metadata about a task.
" + "documentation":"Provides information about an DataSync transfer task.
" }, "DescribeTaskExecution":{ "name":"DescribeTaskExecution", @@ -493,7 +521,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Returns detailed metadata about a task that is being executed.
" + "documentation":"Provides information about an DataSync transfer task that's running.
" }, "GenerateRecommendations":{ "name":"GenerateRecommendations", @@ -726,6 +754,20 @@ "documentation":"Edits a DataSync discovery job configuration.
", "endpoint":{"hostPrefix":"discovery-"} }, + "UpdateLocationAzureBlob":{ + "name":"UpdateLocationAzureBlob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationAzureBlobRequest"}, + "output":{"shape":"UpdateLocationAzureBlobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.
" + }, "UpdateLocationHdfs":{ "name":"UpdateLocationHdfs", "http":{ @@ -852,7 +894,7 @@ }, "AgentArns":{ "shape":"DiscoveryAgentArnList", - "documentation":"Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.
" }, "CloudWatchLogGroupArn":{ "shape":"LogGroupArn", @@ -934,6 +976,50 @@ "BEST_EFFORT" ] }, + "AzureAccessTier":{ + "type":"string", + "enum":[ + "HOT", + "COOL", + "ARCHIVE" + ] + }, + "AzureBlobAuthenticationType":{ + "type":"string", + "enum":["SAS"] + }, + "AzureBlobContainerUrl":{ + "type":"string", + "max":325, + "pattern":"^https:\\/\\/[A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252}\\/[a-z0-9](-?[a-z0-9]){2,62}$" + }, + "AzureBlobSasConfiguration":{ + "type":"structure", + "required":["Token"], + "members":{ + "Token":{ + "shape":"AzureBlobSasToken", + "documentation":"Specifies a SAS token that provides permissions at the Azure storage account, container, or folder level.
The token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this:
sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D
The shared access signature (SAS) configuration that allows DataSync to access your Microsoft Azure Blob Storage.
For more information, see SAS tokens for accessing your Azure Blob Storage.
" + }, + "AzureBlobSasToken":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^.+$", + "sensitive":true + }, + "AzureBlobSubdirectory":{ + "type":"string", + "max":1024, + "pattern":"^[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}\\p{C}]*$" + }, + "AzureBlobType":{ + "type":"string", + "enum":["BLOCK"] + }, "BytesPerSecond":{ "type":"long", "min":-1 @@ -999,11 +1085,11 @@ }, "SubnetArns":{ "shape":"PLSubnetArnList", - "documentation":"Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer.
" + "documentation":"Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer. You can only specify one ARN.
" }, "SecurityGroupArns":{ "shape":"PLSecurityGroupArnList", - "documentation":"Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint. You can only specify one ARN.
" } }, "documentation":"CreateAgentRequest
" @@ -1018,6 +1104,57 @@ }, "documentation":"CreateAgentResponse
" }, + "CreateLocationAzureBlobRequest":{ + "type":"structure", + "required":[ + "ContainerUrl", + "AuthenticationType", + "AgentArns" + ], + "members":{ + "ContainerUrl":{ + "shape":"AzureBlobContainerUrl", + "documentation":"Specifies the URL of the Azure Blob Storage container involved in your transfer.
" + }, + "AuthenticationType":{ + "shape":"AzureBlobAuthenticationType", + "documentation":"Specifies the authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
" + }, + "SasConfiguration":{ + "shape":"AzureBlobSasConfiguration", + "documentation":"Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.
" + }, + "BlobType":{ + "shape":"AzureBlobType", + "documentation":"Specifies the type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
" + }, + "AccessTier":{ + "shape":"AzureAccessTier", + "documentation":"Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
" + }, + "Subdirectory":{ + "shape":"AzureBlobSubdirectory", + "documentation":"Specifies path segments if you want to limit your transfer to a virtual directory in your container (for example, /my/images).
Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.
You can specify more than one agent. For more information, see Using multiple agents for your transfer.
" + }, + "Tags":{ + "shape":"InputTagList", + "documentation":"Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your transfer location.
" + } + } + }, + "CreateLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"The ARN of the Azure Blob Storage transfer location that you created.
" + } + } + }, "CreateLocationEfsRequest":{ "type":"structure", "required":[ @@ -1304,23 +1441,23 @@ "members":{ "Subdirectory":{ "shape":"NfsSubdirectory", - "documentation":"The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.
" + "documentation":"Specifies the subdirectory in the NFS file server that DataSync transfers to or from. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
" }, "ServerHostname":{ "shape":"ServerHostname", - "documentation":"The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.
Specifies the IP address or domain name of your NFS file server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
You must specify be an IP version 4 address or Domain Name System (DNS)-compliant name.
Contains a list of Amazon Resource Names (ARNs) of agents that are used to connect to an NFS server.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
" + "documentation":"Specifies the Amazon Resource Names (ARNs) of agents that DataSync uses to connect to your NFS file server.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
" }, "MountOptions":{ "shape":"NfsMountOptions", - "documentation":"The NFS mount options that DataSync can use to mount your NFS share.
" + "documentation":"Specifies the mount options that DataSync can use to mount your NFS share.
" }, "Tags":{ "shape":"InputTagList", - "documentation":"The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.
" + "documentation":"Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.
" } }, "documentation":"CreateLocationNfsRequest
" @@ -1330,7 +1467,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"The Amazon Resource Name (ARN) of the source NFS file system location that is created.
" + "documentation":"The ARN of the transfer location that you created for your NFS file server.
" } }, "documentation":"CreateLocationNfsResponse
" @@ -1698,6 +1835,49 @@ } } }, + "DescribeLocationAzureBlobRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"Specifies the Amazon Resource Name (ARN) of your Azure Blob Storage transfer location.
" + } + } + }, + "DescribeLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"The ARN of your Azure Blob Storage transfer location.
" + }, + "LocationUri":{ + "shape":"LocationUri", + "documentation":"The URL of the Azure Blob Storage container involved in your transfer.
" + }, + "AuthenticationType":{ + "shape":"AzureBlobAuthenticationType", + "documentation":"The authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
" + }, + "BlobType":{ + "shape":"AzureBlobType", + "documentation":"The type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
" + }, + "AccessTier":{ + "shape":"AzureAccessTier", + "documentation":"The access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"The ARNs of the DataSync agents that can connect with your Azure Blob Storage container.
" + }, + "CreationTime":{ + "shape":"Time", + "documentation":"The time that your Azure Blob Storage transfer location was created.
" + } + } + }, "DescribeLocationEfsRequest":{ "type":"structure", "required":["LocationArn"], @@ -1973,7 +2153,7 @@ "OnPremConfig":{"shape":"OnPremConfig"}, "MountOptions":{ "shape":"NfsMountOptions", - "documentation":"The NFS mount options that DataSync used to mount your NFS share.
" + "documentation":"The mount options that DataSync uses to mount your NFS share.
" }, "CreationTime":{ "shape":"Time", @@ -2272,7 +2452,7 @@ "members":{ "TaskExecutionArn":{ "shape":"TaskExecutionArn", - "documentation":"The Amazon Resource Name (ARN) of the task that is being executed.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the transfer task that's running.
" } }, "documentation":"DescribeTaskExecutionRequest
" @@ -2338,7 +2518,7 @@ "members":{ "TaskArn":{ "shape":"TaskArn", - "documentation":"The Amazon Resource Name (ARN) of the task to describe.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the transfer task.
" } }, "documentation":"DescribeTaskRequest
" @@ -3725,22 +3905,22 @@ "members":{ "VpcEndpointId":{ "shape":"VpcEndpointId", - "documentation":"The ID of the VPC endpoint that is configured for an agent. An agent that is configured with a VPC endpoint will not be accessible over the public internet.
" + "documentation":"Specifies the ID of the VPC endpoint that your agent connects to.
" }, "PrivateLinkEndpoint":{ "shape":"Endpoint", - "documentation":"The private endpoint that is configured for an agent that has access to IP addresses in a PrivateLink. An agent that is configured with this endpoint will not be accessible over the public internet.
" + "documentation":"Specifies the VPC endpoint provided by Amazon Web Services PrivateLink that your agent connects to.
" }, "SubnetArns":{ "shape":"PLSubnetArnList", - "documentation":"The Amazon Resource Names (ARNs) of the subnets that are configured for an agent activated in a VPC or an agent that has access to a VPC endpoint.
" + "documentation":"Specifies the ARN of the subnet where your VPC endpoint is located. You can only specify one ARN.
" }, "SecurityGroupArns":{ "shape":"PLSecurityGroupArnList", - "documentation":"The Amazon Resource Names (ARNs) of the security groups that are configured for the EC2 resource that hosts an agent activated in a VPC or an agent that has access to a VPC endpoint.
" + "documentation":"Specifies the Amazon Resource Names (ARN) of the security group that provides DataSync access to your VPC endpoint. You can only specify one ARN.
" } }, - "documentation":"The VPC endpoint, subnet, and security group that an agent uses to access IP addresses in a VPC (Virtual Private Cloud).
" + "documentation":"Specifies how your DataSync agent connects to Amazon Web Services using a virtual private cloud (VPC) service endpoint. An agent that uses a VPC endpoint isn't accessible over the public internet.
" }, "PtolemyBoolean":{"type":"boolean"}, "PtolemyPassword":{ @@ -4443,6 +4623,45 @@ "members":{ } }, + "UpdateLocationAzureBlobRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"Specifies the ARN of the Azure Blob Storage transfer location that you're updating.
" + }, + "Subdirectory":{ + "shape":"AzureBlobSubdirectory", + "documentation":"Specifies path segments if you want to limit your transfer to a virtual directory in your container (for example, /my/images).
Specifies the authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).
" + }, + "SasConfiguration":{ + "shape":"AzureBlobSasConfiguration", + "documentation":"Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.
" + }, + "BlobType":{ + "shape":"AzureBlobType", + "documentation":"Specifies the type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.
" + }, + "AccessTier":{ + "shape":"AzureAccessTier", + "documentation":"Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.
" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.
You can specify more than one agent. For more information, see Using multiple agents for your transfer.
" + } + } + }, + "UpdateLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateLocationHdfsRequest":{ "type":"structure", "required":["LocationArn"], @@ -4512,11 +4731,11 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"The Amazon Resource Name (ARN) of the NFS location to update.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the NFS location that you want to update.
" }, "Subdirectory":{ "shape":"NfsSubdirectory", - "documentation":"The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.
" + "documentation":"Specifies the subdirectory in your NFS file system that DataSync uses to read from or write to during a transfer. The NFS path should be exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
" }, "OnPremConfig":{"shape":"OnPremConfig"}, "MountOptions":{"shape":"NfsMountOptions"} @@ -4620,7 +4839,7 @@ }, "AgentArns":{ "shape":"DiscoveryAgentArnList", - "documentation":"Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system. You can only specify one ARN.
" }, "Name":{ "shape":"Name", From f0cd16b499e4227717686ba5aba527eac440cee9 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:12:55 +0000 Subject: [PATCH 024/270] Amazon SageMaker Service Update: Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API --- .../feature-AmazonSageMakerService-c8218de.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AmazonSageMakerService-c8218de.json diff --git a/.changes/next-release/feature-AmazonSageMakerService-c8218de.json b/.changes/next-release/feature-AmazonSageMakerService-c8218de.json new file mode 100644 index 000000000000..51f73789ffa0 --- /dev/null +++ b/.changes/next-release/feature-AmazonSageMakerService-c8218de.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API" +} diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index 91ea56331fa2..f9f4442f2469 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -31512,6 +31512,10 @@ }, "TextClassificationJobConfig":{ "type":"structure", + "required":[ + "ContentColumn", + "TargetLabelColumn" + ], "members":{ "CompletionCriteria":{ "shape":"AutoMLJobCompletionCriteria", @@ -31519,11 +31523,11 @@ }, "ContentColumn":{ "shape":"ContentColumn", - "documentation":"The name of the column used to provide the sentences to be classified. It should not be the same as the target column (Required).
" + "documentation":"The name of the column used to provide the sentences to be classified. It should not be the same as the target column.
" }, "TargetLabelColumn":{ "shape":"TargetLabelColumn", - "documentation":"The name of the column used to provide the class labels. It should not be same as the content column (Required).
" + "documentation":"The name of the column used to provide the class labels. It should not be same as the content column.
" } }, "documentation":"Stores the configuration information for the text classification problem of an AutoML job V2.
" From 01742c115d8d5c70a91c804292438c1f0c306b1d Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:13:01 +0000 Subject: [PATCH 025/270] Amazon Connect Wisdom Service Update: This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat --- ...re-AmazonConnectWisdomService-e5cde13.json | 6 + .../codegen-resources/endpoint-tests.json | 148 ++++++++++++------ .../codegen-resources/service-2.json | 32 ++++ 3 files changed, 137 insertions(+), 49 deletions(-) create mode 100644 .changes/next-release/feature-AmazonConnectWisdomService-e5cde13.json diff --git a/.changes/next-release/feature-AmazonConnectWisdomService-e5cde13.json b/.changes/next-release/feature-AmazonConnectWisdomService-e5cde13.json new file mode 100644 index 000000000000..c3ac1cc1c4d6 --- /dev/null +++ b/.changes/next-release/feature-AmazonConnectWisdomService-e5cde13.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Connect Wisdom Service", + "contributor": "", + "description": "This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat" +} diff --git a/services/wisdom/src/main/resources/codegen-resources/endpoint-tests.json b/services/wisdom/src/main/resources/codegen-resources/endpoint-tests.json index 992d9927332d..d3307f81d160 100644 --- a/services/wisdom/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/wisdom/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -216,9 +216,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -229,9 +240,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -242,9 +264,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -255,9 +288,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -268,9 +312,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -281,9 +325,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -295,8 +339,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -306,9 +350,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -318,11 +362,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/wisdom/src/main/resources/codegen-resources/service-2.json b/services/wisdom/src/main/resources/codegen-resources/service-2.json index c4c4b9ccfa60..1f3cd3e59dd0 100644 --- a/services/wisdom/src/main/resources/codegen-resources/service-2.json +++ b/services/wisdom/src/main/resources/codegen-resources/service-2.json @@ -686,6 +686,10 @@ "shape":"Description", "documentation":"The description.
" }, + "integrationConfiguration":{ + "shape":"AssistantIntegrationConfiguration", + "documentation":"The configuration information for the Wisdom assistant integration.
" + }, "name":{ "shape":"Name", "documentation":"The name.
" @@ -709,6 +713,16 @@ }, "documentation":"The assistant data.
" }, + "AssistantIntegrationConfiguration":{ + "type":"structure", + "members":{ + "topicIntegrationArn":{ + "shape":"GenericArn", + "documentation":"The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for streaming chat messages.
" + } + }, + "documentation":"The configuration information for the Wisdom assistant integration.
" + }, "AssistantList":{ "type":"list", "member":{"shape":"AssistantSummary"} @@ -746,6 +760,10 @@ "shape":"Description", "documentation":"The description of the assistant.
" }, + "integrationConfiguration":{ + "shape":"AssistantIntegrationConfiguration", + "documentation":"The configuration information for the Wisdom assistant integration.
" + }, "name":{ "shape":"Name", "documentation":"The name of the assistant.
" @@ -2400,6 +2418,10 @@ "shape":"Description", "documentation":"The description of the session.
" }, + "integrationConfiguration":{ + "shape":"SessionIntegrationConfiguration", + "documentation":"The configuration information for the session integration.
" + }, "name":{ "shape":"Name", "documentation":"The name of the session.
" @@ -2419,6 +2441,16 @@ }, "documentation":"Information about the session.
" }, + "SessionIntegrationConfiguration":{ + "type":"structure", + "members":{ + "topicIntegrationArn":{ + "shape":"GenericArn", + "documentation":"The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for streaming chat messages.
" + } + }, + "documentation":"The configuration information for the session integration.
" + }, "SessionSummaries":{ "type":"list", "member":{"shape":"SessionSummary"} From e8fc7e151c768c73b97c52bc431357f2c087d47d Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:12:58 +0000 Subject: [PATCH 026/270] AWS Transfer Family Update: This release adds support for SFTP Connectors. --- .../feature-AWSTransferFamily-9428e69.json | 6 + .../codegen-resources/service-2.json | 134 +++++++++++++++--- 2 files changed, 120 insertions(+), 20 deletions(-) create mode 100644 .changes/next-release/feature-AWSTransferFamily-9428e69.json diff --git a/.changes/next-release/feature-AWSTransferFamily-9428e69.json b/.changes/next-release/feature-AWSTransferFamily-9428e69.json new file mode 100644 index 000000000000..a7cf474604dd --- /dev/null +++ b/.changes/next-release/feature-AWSTransferFamily-9428e69.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "This release adds support for SFTP Connectors." +} diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json index 6e21589ddef8..916b886bef1f 100644 --- a/services/transfer/src/main/resources/codegen-resources/service-2.json +++ b/services/transfer/src/main/resources/codegen-resources/service-2.json @@ -65,7 +65,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"Creates the connector, which captures the parameters for an outbound connection for the AS2 protocol. The connector is required for sending files to an externally hosted AS2 server. For more details about connectors, see Create AS2 connectors.
" + "documentation":"Creates the connector, which captures the parameters for an outbound connection for the AS2 or SFTP protocol. The connector is required for sending files to an externally hosted AS2 or SFTP server. For more details about AS2 connectors, see Create AS2 connectors.
You must specify exactly one configuration object: either for AS2 (As2Config) or SFTP (SftpConfig).
Deletes the agreement that's specified in the provided ConnectorId.
Deletes the connector that's specified in the provided ConnectorId.
Begins an outbound file transfer to a remote AS2 server. You specify the ConnectorId and the file paths for where to send the files.
Begins a file transfer between local Amazon Web Services storage and a remote AS2 or SFTP server.
For an AS2 connector, you specify the ConnectorId and one or more SendFilePaths to identify the files you want to transfer.
For an SFTP connector, the file transfer can be either outbound or inbound. In both cases, you specify the ConnectorId. Depending on the direction of the transfer, you also specify the following items:
If you are transferring file from a partner's SFTP server to a Transfer Family server, you specify one or more RetreiveFilePaths to identify the files you want to transfer, and a LocalDirectoryPath to specify the destination folder.
If you are transferring file to a partner's SFTP server from Amazon Web Services storage, you specify one or more SendFilePaths to identify the files you want to transfer, and a RemoteDirectoryPath to specify the destination folder.
Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.
There is no response returned from this call.
" }, + "TestConnection":{ + "name":"TestConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestConnectionRequest"}, + "output":{"shape":"TestConnectionResponse"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServiceError"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Tests whether your SFTP connector is set up successfully. We highly recommend that you call this operation to test your ability to transfer files between a Transfer Family server and a trading partner's SFTP server.
" + }, "TestIdentityProvider":{ "name":"TestIdentityProvider", "http":{ @@ -1050,7 +1066,7 @@ "documentation":"Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication, you must provide the name or Amazon Resource Name (ARN) of a secret in Secrets Manager.
The default value for this parameter is null, which indicates that Basic authentication is not enabled for the connector.
If the connector should use Basic authentication, the secret needs to be in the following format:
{ \"Username\": \"user-name\", \"Password\": \"user-password\" }
Replace user-name and user-password with the credentials for the actual user that is being authenticated.
Note the following:
You are storing these credentials in Secrets Manager, not passing them directly into this API.
If you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication. However, if you are using the Amazon Web Services management console, you can have the system create the secret for you.
If you have previously enabled Basic authentication for a connector, you can disable it by using the UpdateConnector API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:
update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'
Contains the details for a connector object. The connector object is used for AS2 outbound processes, to connect the Transfer Family customer with the trading partner.
" + "documentation":"Contains the details for an AS2 connector object. The connector object is used for AS2 outbound processes, to connect the Transfer Family customer with the trading partner.
" }, "As2ConnectorSecretId":{ "type":"string", @@ -1294,17 +1310,16 @@ "type":"structure", "required":[ "Url", - "As2Config", "AccessRole" ], "members":{ "Url":{ "shape":"Url", - "documentation":"The URL of the partner's AS2 endpoint.
" + "documentation":"The URL of the partner's AS2 or SFTP endpoint.
" }, "As2Config":{ "shape":"As2ConnectorConfig", - "documentation":"A structure that contains the parameters for a connector object.
" + "documentation":"A structure that contains the parameters for an AS2 connector object.
" }, "AccessRole":{ "shape":"Role", @@ -1317,6 +1332,10 @@ "Tags":{ "shape":"Tags", "documentation":"Key-value pairs that can be used to group and search for connectors. Tags are metadata attached to connectors for any purpose.
" + }, + "SftpConfig":{ + "shape":"SftpConnectorConfig", + "documentation":"A structure that contains the parameters for an SFTP connector object.
" } } }, @@ -1462,7 +1481,7 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.
The following is an Entry and Target pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.
The following is an Entry and Target pair example for chroot.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.
The following is an Entry and Target pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the value the user should see for their home directory when they log in.
The following is an Entry and Target pair example for chroot.
[ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
The URL of the partner's AS2 endpoint.
" + "documentation":"The URL of the partner's AS2 or SFTP endpoint.
" }, "As2Config":{ "shape":"As2ConnectorConfig", - "documentation":"A structure that contains the parameters for a connector object.
" + "documentation":"A structure that contains the parameters for an AS2 connector object.
" }, "AccessRole":{ "shape":"Role", @@ -2215,6 +2234,10 @@ "Tags":{ "shape":"Tags", "documentation":"Key-value pairs that can be used to group and search for connectors.
" + }, + "SftpConfig":{ + "shape":"SftpConnectorConfig", + "documentation":"A structure that contains the parameters for an SFTP connector object.
" } }, "documentation":"Describes the parameters for the connector, as identified by the ConnectorId.
The URL of the partner's AS2 endpoint.
" + "documentation":"The URL of the partner's AS2 or SFTP endpoint.
" } }, "documentation":"Returns details of the connector that is specified.
" @@ -4035,6 +4058,11 @@ "max":16, "min":0 }, + "SecretId":{ + "type":"string", + "max":2048, + "min":1 + }, "SecurityGroupId":{ "type":"string", "max":20, @@ -4144,6 +4172,31 @@ "PUBLIC_KEY_AND_PASSWORD" ] }, + "SftpConnectorConfig":{ + "type":"structure", + "members":{ + "UserSecretId":{ + "shape":"SecretId", + "documentation":"The identifiers for the secrets (in Amazon Web Services Secrets Manager) that contain the SFTP user's private keys or passwords.
" + }, + "TrustedHostKeys":{ + "shape":"SftpConnectorTrustedHostKeyList", + "documentation":"The public portion of the host key, or keys, that are used to authenticate the user to the external server to which you are connecting. You can use the ssh-keyscan command against the SFTP server to retrieve the necessary key.
The three standard SSH public key format elements are <key type>, <body base64>, and an optional <comment>, with spaces between each element.
For the trusted host key, Transfer Family accepts RSA and ECDSA keys.
For RSA keys, the key type is ssh-rsa.
For ECDSA keys, the key type is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the size of the key you generated.
Contains the details for an SFTP connector object. The connector object is used for transferring files to and from a partner's SFTP server.
" + }, + "SftpConnectorTrustedHostKey":{ + "type":"string", + "max":2048, + "min":1 + }, + "SftpConnectorTrustedHostKeyList":{ + "type":"list", + "member":{"shape":"SftpConnectorTrustedHostKey"}, + "max":10, + "min":1 + }, "SigningAlg":{ "type":"string", "enum":[ @@ -4205,18 +4258,27 @@ }, "StartFileTransferRequest":{ "type":"structure", - "required":[ - "ConnectorId", - "SendFilePaths" - ], + "required":["ConnectorId"], "members":{ "ConnectorId":{ "shape":"ConnectorId", - "documentation":"The unique identifier for the connector.
" + "documentation":"The unique identifier for the connector.
" }, "SendFilePaths":{ "shape":"FilePaths", - "documentation":"An array of strings. Each string represents the absolute path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .
One or more source paths for the Transfer Family server. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .
One or more source paths for the partner's SFTP server. Each string represents a source file path for one inbound file transfer.
" + }, + "LocalDirectoryPath":{ + "shape":"FilePath", + "documentation":"For an inbound transfer, the LocaDirectoryPath specifies the destination for one or more files that are transferred from the partner's SFTP server.
For an outbound transfer, the RemoteDirectoryPath specifies the destination for one or more files that are transferred to the partner's SFTP server. If you don't specify a RemoteDirectoryPath, the destination for transferred files is the SFTP user's home directory.
Returns the unique identifier for this file transfer.
" + "documentation":"Returns the unique identifier for the file transfer.
" } } }, @@ -4252,6 +4314,7 @@ "STOP_FAILED" ] }, + "Status":{"type":"string"}, "StatusCode":{"type":"integer"}, "StepResultOutputsJson":{ "type":"string", @@ -4351,6 +4414,33 @@ "max":50, "min":1 }, + "TestConnectionRequest":{ + "type":"structure", + "required":["ConnectorId"], + "members":{ + "ConnectorId":{ + "shape":"ConnectorId", + "documentation":"The unique identifier for the connector.
" + } + } + }, + "TestConnectionResponse":{ + "type":"structure", + "members":{ + "ConnectorId":{ + "shape":"ConnectorId", + "documentation":"Returns the identifier of the connector object that you are testing.
" + }, + "Status":{ + "shape":"Status", + "documentation":"Returns OK for successful test, or ERROR if the test fails.
Returns Connection succeeded if the test is successful. Or, returns a descriptive error message if the test fails. The following list provides the details for some error messages and troubleshooting steps for each.
Unable to access secrets manager: Verify that your secret name aligns with the one in Transfer Role permissions.
Unknown Host/Connection failed: Verify the server URL in the connector configuration , and verify that the login credentials work successfully outside of the connector.
Private key not found: Verify that the secret exists and is formatted correctly.
Invalid trusted host keys: Verify that the trusted host key in the connector configuration matches the ssh-keyscan output.
The URL of the partner's AS2 endpoint.
" + "documentation":"The URL of the partner's AS2 or SFTP endpoint.
" }, "As2Config":{ "shape":"As2ConnectorConfig", - "documentation":"A structure that contains the parameters for a connector object.
" + "documentation":"A structure that contains the parameters for an AS2 connector object.
" }, "AccessRole":{ "shape":"Role", @@ -4605,6 +4695,10 @@ "LoggingRole":{ "shape":"Role", "documentation":"The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.
" + }, + "SftpConfig":{ + "shape":"SftpConnectorConfig", + "documentation":"A structure that contains the parameters for an SFTP connector object.
" } } }, From b70a1a77d07fb1eab6973607ea63fe2b7b329e52 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:13:03 +0000 Subject: [PATCH 027/270] AWSBillingConductor Update: Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups. --- .../feature-AWSBillingConductor-409d2bd.json | 6 + .../codegen-resources/endpoint-rule-set.json | 237 ++---------------- .../codegen-resources/endpoint-tests.json | 126 +++++++--- .../codegen-resources/service-2.json | 236 ++++++++++------- 4 files changed, 256 insertions(+), 349 deletions(-) create mode 100644 .changes/next-release/feature-AWSBillingConductor-409d2bd.json diff --git a/.changes/next-release/feature-AWSBillingConductor-409d2bd.json b/.changes/next-release/feature-AWSBillingConductor-409d2bd.json new file mode 100644 index 000000000000..37209b57c473 --- /dev/null +++ b/.changes/next-release/feature-AWSBillingConductor-409d2bd.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWSBillingConductor", + "contributor": "", + "description": "Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups." +} diff --git a/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json index b4ea1f3843c2..a71edcaf080e 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -138,208 +138,40 @@ }, "aws" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor-fips.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://billingconductor.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "name": "sigv4", + "signingName": "billingconductor", + "signingRegion": "us-east-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "billingconductor", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -543,33 +375,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://billingconductor.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "billingconductor", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/billingconductor/src/main/resources/codegen-resources/endpoint-tests.json b/services/billingconductor/src/main/resources/codegen-resources/endpoint-tests.json index b9c1c6c37988..05fb6f36f466 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/billingconductor/src/main/resources/codegen-resources/endpoint-tests.json @@ -18,8 +18,8 @@ }, "params": { "Region": "aws-global", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -31,8 +31,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -44,8 +44,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -57,8 +57,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -79,8 +79,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -92,8 +92,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -105,8 +105,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -118,8 +118,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -131,8 +131,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -144,8 +144,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -157,8 +157,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -170,8 +170,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -183,8 +183,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -196,8 +207,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -209,8 +231,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -222,8 +255,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -235,8 +279,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -248,8 +292,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -261,8 +305,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -273,8 +317,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -285,10 +329,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/billingconductor/src/main/resources/codegen-resources/service-2.json b/services/billingconductor/src/main/resources/codegen-resources/service-2.json index d51720264a14..87d16cf39559 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/service-2.json +++ b/services/billingconductor/src/main/resources/codegen-resources/service-2.json @@ -625,22 +625,22 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"The associating array of account IDs.
" + "documentation":"The associating array of account IDs.
" }, "BillingGroupArn":{ "shape":"BillingGroupArn", - "documentation":"The Billing Group Arn that the linked account is associated to.
" + "documentation":"The Billing Group Arn that the linked account is associated to.
" }, "AccountName":{ "shape":"AccountName", - "documentation":"The Amazon Web Services account name.
" + "documentation":"The Amazon Web Services account name.
" }, "AccountEmail":{ "shape":"AccountEmail", - "documentation":"The Amazon Web Services account email.
" + "documentation":"The Amazon Web Services account email.
" } }, - "documentation":"A representation of a linked account.
" + "documentation":"A representation of a linked account.
" }, "AccountEmail":{ "type":"string", @@ -652,10 +652,14 @@ "members":{ "LinkedAccountIds":{ "shape":"AccountIdList", - "documentation":"The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.
" + "documentation":"The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.
" + }, + "AutoAssociate":{ + "shape":"Boolean", + "documentation":"Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.
" } }, - "documentation":"The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated family.
" + "documentation":"The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.
" }, "AccountId":{ "type":"string", @@ -740,14 +744,14 @@ "members":{ "Message":{ "shape":"String", - "documentation":"The reason why the resource association failed.
" + "documentation":"The reason why the resource association failed.
" }, "Reason":{ "shape":"AssociateResourceErrorReason", - "documentation":"A static error code that's used to classify the type of failure.
" + "documentation":"A static error code that's used to classify the type of failure.
" } }, - "documentation":"A representation of a resource association error.
" + "documentation":"A representation of a resource association error.
" }, "AssociateResourceErrorReason":{ "type":"string", @@ -764,14 +768,14 @@ "members":{ "Arn":{ "shape":"CustomLineItemAssociationElement", - "documentation":"The resource ARN that was associated to the custom line item.
" + "documentation":"The resource ARN that was associated to the custom line item.
" }, "Error":{ "shape":"AssociateResourceError", - "documentation":" An AssociateResourceError that will populate if the resource association fails.
An AssociateResourceError that will populate if the resource association fails.
A resource association result for a percentage custom line item.
" + "documentation":"A resource association result for a percentage custom line item.
" }, "AssociateResourcesResponseList":{ "type":"list", @@ -862,30 +866,30 @@ "members":{ "Arn":{ "shape":"BillingGroupArn", - "documentation":"The Amazon Resource Name (ARN) of a billing group.
" + "documentation":"The Amazon Resource Name (ARN) of a billing group.
" }, "AWSCost":{ "shape":"AWSCost", - "documentation":"The actual Amazon Web Services charges for the billing group.
" + "documentation":"The actual Amazon Web Services charges for the billing group.
" }, "ProformaCost":{ "shape":"ProformaCost", - "documentation":"The hypothetical Amazon Web Services charges based on the associated pricing plan of a billing group.
" + "documentation":"The hypothetical Amazon Web Services charges based on the associated pricing plan of a billing group.
" }, "Margin":{ "shape":"Margin", - "documentation":"The billing group margin.
" + "documentation":"The billing group margin.
" }, "MarginPercentage":{ "shape":"MarginPercentage", - "documentation":"The percentage of billing group margin.
" + "documentation":"The percentage of billing group margin.
" }, "Currency":{ "shape":"Currency", - "documentation":"The displayed currency.
" + "documentation":"The displayed currency.
" } }, - "documentation":"A summary report of actual Amazon Web Services charges and calculated Amazon Web Services charges, based on the associated pricing plan of a billing group.
" + "documentation":"A summary report of actual Amazon Web Services charges and calculated Amazon Web Services charges, based on the associated pricing plan of a billing group.
" }, "BillingGroupCostReportList":{ "type":"list", @@ -910,43 +914,47 @@ "members":{ "Name":{ "shape":"BillingGroupName", - "documentation":"The name of the billing group.
" + "documentation":"The name of the billing group.
" }, "Arn":{ "shape":"BillingGroupArn", - "documentation":"The Amazon Resource Number (ARN) that can be used to uniquely identify the billing group.
" + "documentation":"The Amazon Resource Number (ARN) that can be used to uniquely identify the billing group.
" }, "Description":{ "shape":"BillingGroupDescription", - "documentation":"The description of the billing group.
" + "documentation":"The description of the billing group.
" }, "PrimaryAccountId":{ "shape":"AccountId", - "documentation":"The account ID that serves as the main account in a billing group.
" + "documentation":"The account ID that serves as the main account in a billing group.
" }, "ComputationPreference":{"shape":"ComputationPreference"}, "Size":{ "shape":"NumberOfAccounts", - "documentation":"The number of accounts in the particular billing group.
" + "documentation":"The number of accounts in the particular billing group.
" }, "CreationTime":{ "shape":"Instant", - "documentation":"The time when the billing group was created.
" + "documentation":"The time when the billing group was created.
" }, "LastModifiedTime":{ "shape":"Instant", - "documentation":"The most recent time when the billing group was modified.
" + "documentation":"The most recent time when the billing group was modified.
" }, "Status":{ "shape":"BillingGroupStatus", - "documentation":"The billing group status. Only one of the valid values can be used.
" + "documentation":"The billing group status. Only one of the valid values can be used.
" }, "StatusReason":{ "shape":"BillingGroupStatusReason", - "documentation":"The reason why the billing group is in its current status.
" + "documentation":"The reason why the billing group is in its current status.
" + }, + "AccountGrouping":{ + "shape":"ListBillingGroupAccountGrouping", + "documentation":"Specifies if the billing group has automatic account association (AutoAssociate) enabled.
A representation of a billing group.
" + "documentation":"A representation of a billing group.
" }, "BillingGroupName":{ "type":"string", @@ -973,6 +981,10 @@ "type":"string", "pattern":"\\d{4}-(0?[1-9]|1[012])" }, + "Boolean":{ + "type":"boolean", + "box":true + }, "ClientToken":{ "type":"string", "max":64, @@ -988,7 +1000,7 @@ "documentation":"The Amazon Resource Name (ARN) of the pricing plan that's used to compute the Amazon Web Services charges for a billing group.
" } }, - "documentation":"The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group.
" + "documentation":"The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group.
" }, "ConflictException":{ "type":"structure", @@ -1050,7 +1062,7 @@ }, "AccountGrouping":{ "shape":"AccountGrouping", - "documentation":"The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated family.
" + "documentation":"The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.
" }, "ComputationPreference":{ "shape":"ComputationPreference", @@ -1306,14 +1318,14 @@ "members":{ "InclusiveStartBillingPeriod":{ "shape":"BillingPeriod", - "documentation":"The inclusive start billing period that defines a billing period range where a custom line is applied.
" + "documentation":"The inclusive start billing period that defines a billing period range where a custom line is applied.
" }, "ExclusiveEndBillingPeriod":{ "shape":"BillingPeriod", - "documentation":"The inclusive end billing period that defines a billing period range where a custom line is applied.
" + "documentation":"The inclusive end billing period that defines a billing period range where a custom line is applied.
" } }, - "documentation":"The billing period range in which the custom line item request will be applied.
" + "documentation":"The billing period range in which the custom line item request will be applied.
" }, "CustomLineItemChargeDetails":{ "type":"structure", @@ -1321,18 +1333,18 @@ "members":{ "Flat":{ "shape":"CustomLineItemFlatChargeDetails", - "documentation":" A CustomLineItemFlatChargeDetails that describes the charge details of a flat custom line item.
A CustomLineItemFlatChargeDetails that describes the charge details of a flat custom line item.
A CustomLineItemPercentageChargeDetails that describes the charge details of a percentage custom line item.
A CustomLineItemPercentageChargeDetails that describes the charge details of a percentage custom line item.
The type of the custom line item that indicates whether the charge is a fee or credit.
" + "documentation":"The type of the custom line item that indicates whether the charge is a fee or credit.
" } }, - "documentation":" The charge details of a custom line item. It should contain only one of Flat or Percentage.
The charge details of a custom line item. It should contain only one of Flat or Percentage.
The custom line item's fixed charge value in USD.
" + "documentation":"The custom line item's fixed charge value in USD.
" } }, - "documentation":"A representation of the charge details that are associated with a flat custom line item.
" + "documentation":"A representation of the charge details that are associated with a flat custom line item.
" }, "CustomLineItemList":{ "type":"list", @@ -1366,46 +1378,46 @@ "members":{ "Arn":{ "shape":"CustomLineItemArn", - "documentation":"The Amazon Resource Names (ARNs) for custom line items.
" + "documentation":"The Amazon Resource Names (ARNs) for custom line items.
" }, "Name":{ "shape":"CustomLineItemName", - "documentation":"The custom line item's name.
" + "documentation":"The custom line item's name.
" }, "ChargeDetails":{ "shape":"ListCustomLineItemChargeDetails", - "documentation":" A ListCustomLineItemChargeDetails that describes the charge details of a custom line item.
A ListCustomLineItemChargeDetails that describes the charge details of a custom line item.
The custom line item's charge value currency. Only one of the valid values can be used.
" + "documentation":"The custom line item's charge value currency. Only one of the valid values can be used.
" }, "Description":{ "shape":"CustomLineItemDescription", - "documentation":"The custom line item's description. This is shown on the Bills page in association with the charge value.
" + "documentation":"The custom line item's description. This is shown on the Bills page in association with the charge value.
" }, "ProductCode":{ "shape":"CustomLineItemProductCode", - "documentation":"The product code that's associated with the custom line item.
" + "documentation":"The product code that's associated with the custom line item.
" }, "BillingGroupArn":{ "shape":"BillingGroupArn", - "documentation":"The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.
" + "documentation":"The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.
" }, "CreationTime":{ "shape":"Instant", - "documentation":"The time created.
" + "documentation":"The time created.
" }, "LastModifiedTime":{ "shape":"Instant", - "documentation":"The most recent time when the custom line item was modified.
" + "documentation":"The most recent time when the custom line item was modified.
" }, "AssociationSize":{ "shape":"NumberOfAssociations", - "documentation":"The number of resources that are associated to the custom line item.
" + "documentation":"The number of resources that are associated to the custom line item.
" } }, - "documentation":"A representation of a custom line item.
" + "documentation":"A representation of a custom line item.
" }, "CustomLineItemName":{ "type":"string", @@ -1426,14 +1438,14 @@ "members":{ "PercentageValue":{ "shape":"CustomLineItemPercentageChargeValue", - "documentation":"The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.
" + "documentation":"The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.
" }, "AssociatedValues":{ "shape":"CustomLineItemAssociationsList", - "documentation":"A list of resource ARNs to associate to the percentage custom line item.
" + "documentation":"A list of resource ARNs to associate to the percentage custom line item.
" } }, - "documentation":"A representation of the charge details that are associated with a percentage custom line item.
" + "documentation":"A representation of the charge details that are associated with a percentage custom line item.
" }, "CustomLineItemPercentageChargeValue":{ "type":"double", @@ -1653,14 +1665,14 @@ "members":{ "Arn":{ "shape":"CustomLineItemAssociationElement", - "documentation":"The resource ARN that was disassociated from the custom line item.
" + "documentation":"The resource ARN that was disassociated from the custom line item.
" }, "Error":{ "shape":"AssociateResourceError", "documentation":" An AssociateResourceError that's shown if the resource disassociation fails.
A resource disassociation result for a percentage custom line item.
" + "documentation":"A resource disassociation result for a percentage custom line item.
" }, "DisassociateResourcesResponseList":{ "type":"list", @@ -1700,18 +1712,18 @@ "members":{ "Association":{ "shape":"Association", - "documentation":" MONITORED: linked accounts that are associated to billing groups.
UNMONITORED: linked accounts that are not associated to billing groups.
Billing Group Arn: linked accounts that are associated to the provided Billing Group Arn.
MONITORED: linked accounts that are associated to billing groups.
UNMONITORED: linked accounts that are not associated to billing groups.
Billing Group Arn: linked accounts that are associated to the provided Billing Group Arn.
The Amazon Web Services account ID to filter on.
" + "documentation":"The Amazon Web Services account ID to filter on.
" }, "AccountIds":{ "shape":"AccountIdFilterList", "documentation":"The list of Amazon Web Services IDs to retrieve their associated billing group for a given time range.
" } }, - "documentation":"The filter on the account ID of the linked account, or any of the following:
MONITORED: linked accounts that are associated to billing groups.
UNMONITORED: linked accounts that are not associated to billing groups.
Billing Group Arn: linked accounts that are associated to the provided Billing Group Arn.
The filter on the account ID of the linked account, or any of the following:
MONITORED: linked accounts that are associated to billing groups.
UNMONITORED: linked accounts that are not associated to billing groups.
Billing Group Arn: linked accounts that are associated to the provided Billing Group Arn.
Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.
" + } + }, + "documentation":"Specifies if the billing group has the following features enabled.
" + }, "ListBillingGroupCostReportsFilter":{ "type":"structure", "members":{ @@ -1751,7 +1773,7 @@ "documentation":"The list of Amazon Resource Names (ARNs) used to filter billing groups to retrieve reports.
" } }, - "documentation":"The filter used to retrieve specific BillingGroupCostReportElements.
The filter used to retrieve specific BillingGroupCostReportElements.
The list of billing group Amazon Resource Names (ARNs) to retrieve information.
" + "documentation":"The list of billing group Amazon Resource Names (ARNs) to retrieve information.
" }, "PricingPlan":{ "shape":"PricingPlanFullArn", - "documentation":"The pricing plan Amazon Resource Names (ARNs) to retrieve information.
" + "documentation":"The pricing plan Amazon Resource Names (ARNs) to retrieve information.
" }, "Statuses":{ "shape":"BillingGroupStatusList", "documentation":"A list of billing groups to retrieve their current status for a specific time range
" + }, + "AutoAssociate":{ + "shape":"Boolean", + "documentation":"Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.
" } }, - "documentation":"The filter that specifies the billing groups and pricing plans to retrieve billing group information.
" + "documentation":"The filter that specifies the billing groups and pricing plans to retrieve billing group information.
" }, "ListBillingGroupsInput":{ "type":"structure", @@ -1944,18 +1970,18 @@ "members":{ "Names":{ "shape":"CustomLineItemNameList", - "documentation":"A list of custom line items to retrieve information.
" + "documentation":"A list of custom line items to retrieve information.
" }, "BillingGroups":{ "shape":"BillingGroupArnList", - "documentation":"The billing group Amazon Resource Names (ARNs) to retrieve information.
" + "documentation":"The billing group Amazon Resource Names (ARNs) to retrieve information.
" }, "Arns":{ "shape":"CustomLineItemArns", - "documentation":"A list of custom line item ARNs to retrieve information.
" + "documentation":"A list of custom line item ARNs to retrieve information.
" } }, - "documentation":"A filter that specifies the custom line items and billing groups to retrieve FFLI information.
" + "documentation":"A filter that specifies the custom line items and billing groups to retrieve FFLI information.
" }, "ListCustomLineItemsInput":{ "type":"structure", @@ -2039,10 +2065,10 @@ "members":{ "Arns":{ "shape":"PricingPlanArns", - "documentation":"A list of pricing plan Amazon Resource Names (ARNs) to retrieve information.
" + "documentation":"A list of pricing plan Amazon Resource Names (ARNs) to retrieve information.
" } }, - "documentation":"The filter that specifies the Amazon Resource Names (ARNs) of pricing plans, to retrieve pricing plan information.
" + "documentation":"The filter that specifies the Amazon Resource Names (ARNs) of pricing plans, to retrieve pricing plan information.
" }, "ListPricingPlansInput":{ "type":"structure", @@ -2130,10 +2156,10 @@ "members":{ "Arns":{ "shape":"PricingRuleArns", - "documentation":"A list containing the pricing rule Amazon Resource Names (ARNs) to include in the API response.
" + "documentation":"A list containing the pricing rule Amazon Resource Names (ARNs) to include in the API response.
" } }, - "documentation":" The filter that specifies criteria that the pricing rules returned by the ListPricingRules API will adhere to.
The filter that specifies criteria that the pricing rules returned by the ListPricingRules API will adhere to.
The name of a pricing plan.
" + "documentation":"The name of a pricing plan.
" }, "Arn":{ "shape":"PricingPlanArn", - "documentation":"The pricing plan Amazon Resource Names (ARN). This can be used to uniquely identify a pricing plan.
" + "documentation":"The pricing plan Amazon Resource Names (ARN). This can be used to uniquely identify a pricing plan.
" }, "Description":{ "shape":"PricingPlanDescription", - "documentation":"The pricing plan description.
" + "documentation":"The pricing plan description.
" }, "Size":{ "shape":"NumberOfAssociatedPricingRules", - "documentation":"The pricing rules count that's currently associated with this pricing plan list element.
" + "documentation":"The pricing rules count that's currently associated with this pricing plan list element.
" }, "CreationTime":{ "shape":"Instant", - "documentation":"The time when the pricing plan was created.
" + "documentation":"The time when the pricing plan was created.
" }, "LastModifiedTime":{ "shape":"Instant", - "documentation":"The most recent time when the pricing plan was modified.
" + "documentation":"The most recent time when the pricing plan was modified.
" } }, - "documentation":"A representation of a pricing plan.
" + "documentation":"A representation of a pricing plan.
" }, "PricingPlanName":{ "type":"string", @@ -2420,43 +2446,43 @@ "members":{ "Name":{ "shape":"PricingRuleName", - "documentation":"The name of a pricing rule.
" + "documentation":"The name of a pricing rule.
" }, "Arn":{ "shape":"PricingRuleArn", - "documentation":"The Amazon Resource Name (ARN) used to uniquely identify a pricing rule.
" + "documentation":"The Amazon Resource Name (ARN) used to uniquely identify a pricing rule.
" }, "Description":{ "shape":"PricingRuleDescription", - "documentation":"The pricing rule description.
" + "documentation":"The pricing rule description.
" }, "Scope":{ "shape":"PricingRuleScope", - "documentation":"The scope of pricing rule that indicates if it is globally applicable, or if it is service-specific.
" + "documentation":"The scope of pricing rule that indicates if it is globally applicable, or if it is service-specific.
" }, "Type":{ "shape":"PricingRuleType", - "documentation":"The type of pricing rule.
" + "documentation":"The type of pricing rule.
" }, "ModifierPercentage":{ "shape":"ModifierPercentage", - "documentation":"A percentage modifier applied on the public pricing rates.
" + "documentation":"A percentage modifier applied on the public pricing rates.
" }, "Service":{ "shape":"Service", - "documentation":" If the Scope attribute is SERVICE, this attribute indicates which service the PricingRule is applicable for.
If the Scope attribute is SERVICE, this attribute indicates which service the PricingRule is applicable for.
The pricing plans count that this pricing rule is associated with.
" + "documentation":"The pricing plans count that this pricing rule is associated with.
" }, "CreationTime":{ "shape":"Instant", - "documentation":"The time when the pricing rule was created.
" + "documentation":"The time when the pricing rule was created.
" }, "LastModifiedTime":{ "shape":"Instant", - "documentation":"The most recent time when the pricing rule was modified.
" + "documentation":"The most recent time when the pricing rule was modified.
" }, "BillingEntity":{ "shape":"BillingEntity", @@ -2475,7 +2501,7 @@ "documentation":"Operation is the specific Amazon Web Services action covered by this line item. This describes the specific usage of the line item.
If the Scope attribute is set to SKU, this attribute indicates which operation the PricingRule is modifying. For example, a value of RunInstances:0202 indicates the operation of running an Amazon EC2 instance.
A representation of a pricing rule.
" + "documentation":"A representation of a pricing rule.
" }, "PricingRuleName":{ "type":"string", @@ -2676,6 +2702,16 @@ "members":{ } }, + "UpdateBillingGroupAccountGrouping":{ + "type":"structure", + "members":{ + "AutoAssociate":{ + "shape":"Boolean", + "documentation":"Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.
" + } + }, + "documentation":"Specifies if the billing group has the following features enabled.
" + }, "UpdateBillingGroupInput":{ "type":"structure", "required":["Arn"], @@ -2699,6 +2735,10 @@ "Description":{ "shape":"BillingGroupDescription", "documentation":"A description of the billing group.
" + }, + "AccountGrouping":{ + "shape":"UpdateBillingGroupAccountGrouping", + "documentation":"Specifies if the billing group has automatic account association (AutoAssociate) enabled.
The reason why the billing group is in its current status.
" + }, + "AccountGrouping":{ + "shape":"UpdateBillingGroupAccountGrouping", + "documentation":"Specifies if the billing group has automatic account association (AutoAssociate) enabled.
The fields that caused the error, if applicable.
" } }, - "documentation":"The input doesn't match with the constraints specified by Amazon Web Services services.
", + "documentation":"The input doesn't match with the constraints specified by Amazon Web Services.
", "error":{ "httpStatusCode":400, "senderFault":true @@ -3023,14 +3067,14 @@ "members":{ "Name":{ "shape":"String", - "documentation":"The field name.
" + "documentation":"The field name.
" }, "Message":{ "shape":"String", - "documentation":"The message describing why the field failed validation.
" + "documentation":"The message describing why the field failed validation.
" } }, - "documentation":"The field's information of a request that resulted in an exception.
" + "documentation":"The field's information of a request that resulted in an exception.
" }, "ValidationExceptionFieldList":{ "type":"list", @@ -3095,7 +3139,9 @@ "ILLEGAL_OPERATION", "ILLEGAL_USAGE_TYPE", "INVALID_SKU_COMBO", - "INVALID_FILTER" + "INVALID_FILTER", + "TOO_MANY_AUTO_ASSOCIATE_BILLING_GROUPS", + "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP" ] } }, From 8854de8ec2dd952c21c75b2cedf10073849b2ced Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:13:01 +0000 Subject: [PATCH 028/270] Amazon Elastic Compute Cloud Update: This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes. --- ...ure-AmazonElasticComputeCloud-750fa4b.json | 6 +++ .../codegen-resources/service-2.json | 37 ++++++++++++++++++- 2 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-AmazonElasticComputeCloud-750fa4b.json diff --git a/.changes/next-release/feature-AmazonElasticComputeCloud-750fa4b.json b/.changes/next-release/feature-AmazonElasticComputeCloud-750fa4b.json new file mode 100644 index 000000000000..56269bc4d278 --- /dev/null +++ b/.changes/next-release/feature-AmazonElasticComputeCloud-750fa4b.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes." +} diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index ce2907650a2c..9793e60d3dac 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -8950,6 +8950,7 @@ ] }, "BareMetalFlag":{"type":"boolean"}, + "BaselineBandwidthInGbps":{"type":"double"}, "BaselineBandwidthInMbps":{"type":"integer"}, "BaselineEbsBandwidthMbps":{ "type":"structure", @@ -32633,6 +32634,11 @@ "shape":"InferenceDeviceInfoList", "documentation":"Describes the Inference accelerators for the instance type.
", "locationName":"accelerators" + }, + "TotalInferenceMemoryInMiB":{ + "shape":"totalInferenceMemory", + "documentation":"The total size of the memory for the inference accelerators for the instance type, in MiB.
", + "locationName":"totalInferenceMemoryInMiB" } }, "documentation":"Describes the Inference accelerators for the instance type.
" @@ -32655,6 +32661,11 @@ "shape":"InferenceDeviceManufacturerName", "documentation":"The manufacturer of the Inference accelerator.
", "locationName":"manufacturer" + }, + "MemoryInfo":{ + "shape":"InferenceDeviceMemoryInfo", + "documentation":"Describes the memory available to the inference accelerator.
", + "locationName":"memoryInfo" } }, "documentation":"Describes the Inference accelerators for the instance type.
" @@ -32665,6 +32676,18 @@ "locationName":"item" }, "InferenceDeviceManufacturerName":{"type":"string"}, + "InferenceDeviceMemoryInfo":{ + "type":"structure", + "members":{ + "SizeInMiB":{ + "shape":"InferenceDeviceMemorySize", + "documentation":"The size of the memory available to the inference accelerator, in MiB.
", + "locationName":"sizeInMiB" + } + }, + "documentation":"Describes the memory available to the inference accelerator.
" + }, + "InferenceDeviceMemorySize":{"type":"integer"}, "InferenceDeviceName":{"type":"string"}, "InsideCidrBlocksStringList":{ "type":"list", @@ -42754,6 +42777,16 @@ "shape":"MaxNetworkInterfaces", "documentation":"The maximum number of network interfaces for the network card.
", "locationName":"maximumNetworkInterfaces" + }, + "BaselineBandwidthInGbps":{ + "shape":"BaselineBandwidthInGbps", + "documentation":"The baseline network performance of the network card, in Gbps.
", + "locationName":"baselineBandwidthInGbps" + }, + "PeakBandwidthInGbps":{ + "shape":"PeakBandwidthInGbps", + "documentation":"The peak (burst) network performance of the network card, in Gbps.
", + "locationName":"peakBandwidthInGbps" } }, "documentation":"Describes the network card support of the instance type.
" @@ -44197,6 +44230,7 @@ }, "documentation":"Describes the data that identifies an Amazon FPGA image (AFI) on the PCI bus.
" }, + "PeakBandwidthInGbps":{"type":"double"}, "PeeringAttachmentStatus":{ "type":"structure", "members":{ @@ -58193,7 +58227,8 @@ } }, "totalFpgaMemory":{"type":"integer"}, - "totalGpuMemory":{"type":"integer"} + "totalGpuMemory":{"type":"integer"}, + "totalInferenceMemory":{"type":"integer"} }, "documentation":"Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the Amazon Web Services Cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the Amazon Web Services Cloud where you can launch Amazon Web Services resources in a virtual network that you've defined. Amazon Elastic Block Store (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available and reliable storage volumes that can be attached to any running instance and used like a hard drive.
To learn more, see the following resources:
Amazon EC2: Amazon EC2 product page, Amazon EC2 documentation
Amazon EBS: Amazon EBS product page, Amazon EBS documentation
Amazon VPC: Amazon VPC product page, Amazon VPC documentation
The source identity specified by the principal that is calling the AssumeRole operation.
You can require users to specify a source identity when they assume a role. You do this by using the sts:SourceIdentity condition key in a role trust policy. You can use source identity information in CloudTrail logs to determine who took actions with a role. You can use the aws:SourceIdentity condition key to further control access to Amazon Web Services resources based on the value of source identity. For more information about using source identity, see Monitor and control actions taken with assumed roles in the IAM User Guide.
The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-. You cannot use a value that begins with the text aws:. This prefix is reserved for Amazon Web Services internal use.
Reserved for future use.
" } } }, @@ -322,7 +326,7 @@ }, "WebIdentityToken":{ "shape":"clientTokenType", - "documentation":"The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call.
The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA algorithms (RS256) are supported.
A reference to the IAM managed policy that is passed as a session policy for a role session or a federated user session.
" }, + "ProvidedContext":{ + "type":"structure", + "members":{ + "ProviderArn":{ + "shape":"arnType", + "documentation":"Reserved for future use.
" + }, + "ContextAssertion":{ + "shape":"contextAssertionType", + "documentation":"Reserved for future use.
" + } + }, + "documentation":"Reserved for future use.
" + }, + "ProvidedContextsListType":{ + "type":"list", + "member":{"shape":"ProvidedContext"}, + "max":5 + }, "RegionDisabledException":{ "type":"structure", "members":{ @@ -745,6 +768,11 @@ "min":4, "sensitive":true }, + "contextAssertionType":{ + "type":"string", + "max":2048, + "min":4 + }, "dateType":{"type":"timestamp"}, "decodedMessageType":{"type":"string"}, "durationSecondsType":{ From 4a521a19b12beaf201a2d80a3d405e5a1da718f3 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:13:44 +0000 Subject: [PATCH 030/270] EMR Serverless Update: This release adds support for publishing application logs to CloudWatch. --- .../feature-EMRServerless-a99eac6.json | 6 ++ .../codegen-resources/service-2.json | 66 ++++++++++++++++++- 2 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-EMRServerless-a99eac6.json diff --git a/.changes/next-release/feature-EMRServerless-a99eac6.json b/.changes/next-release/feature-EMRServerless-a99eac6.json new file mode 100644 index 000000000000..847926e5532e --- /dev/null +++ b/.changes/next-release/feature-EMRServerless-a99eac6.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "This release adds support for publishing application logs to CloudWatch." +} diff --git a/services/emrserverless/src/main/resources/codegen-resources/service-2.json b/services/emrserverless/src/main/resources/codegen-resources/service-2.json index 1b77304cc362..a4437ef55a38 100644 --- a/services/emrserverless/src/main/resources/codegen-resources/service-2.json +++ b/services/emrserverless/src/main/resources/codegen-resources/service-2.json @@ -94,7 +94,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"Returns a URL to access the job run dashboard. The generated URL is valid for one hour, after which you must invoke the API again to generate a new URL.
" + "documentation":"Creates and returns a URL that you can use to access the application UIs for a job run.
For jobs in a running state, the application UI is a live user interface such as the Spark or Tez web UI. For completed jobs, the application UI is a persistent application user interface such as the Spark History Server or persistent Tez UI.
The URL is valid for one hour after you generate it. To access the application UI after that hour elapses, you must invoke the API again to generate a new URL.
Enables CloudWatch logging.
" + }, + "logGroupName":{ + "shape":"LogGroupName", + "documentation":"The name of the log group in Amazon CloudWatch Logs where you want to publish your logs.
" + }, + "logStreamNamePrefix":{ + "shape":"LogStreamNamePrefix", + "documentation":"Prefix for the CloudWatch log stream name.
" + }, + "encryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"The Key Management Service (KMS) key ARN to encrypt the logs that you store in CloudWatch Logs.
" + }, + "logTypes":{ + "shape":"LogTypeMap", + "documentation":"The types of logs that you want to publish to CloudWatch. If you don't specify any log types, driver STDOUT and STDERR logs will be published to CloudWatch Logs by default. For more information including the supported worker types for Hive and Spark, see Logging for EMR Serverless with CloudWatch.
Key Valid Values: SPARK_DRIVER, SPARK_EXECUTOR, HIVE_DRIVER, TEZ_TASK
Array Members Valid Values: STDOUT, STDERR, HIVE_LOG, TEZ_AM, SYSTEM_LOGS
The Amazon CloudWatch configuration for monitoring logs. You can configure your jobs to send log information to CloudWatch.
" + }, "Configuration":{ "type":"structure", "required":["classification"], @@ -1263,6 +1290,38 @@ } } }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "LogStreamNamePrefix":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "LogTypeList":{ + "type":"list", + "member":{"shape":"LogTypeString"}, + "max":5, + "min":1 + }, + "LogTypeMap":{ + "type":"map", + "key":{"shape":"WorkerTypeString"}, + "value":{"shape":"LogTypeList"}, + "max":4, + "min":1 + }, + "LogTypeString":{ + "type":"string", + "documentation":"Log type for a Spark/Hive job-run.
", + "max":50, + "min":1, + "pattern":"[a-zA-Z]+[-_]*[a-zA-Z]+" + }, "ManagedPersistenceMonitoringConfiguration":{ "type":"structure", "members":{ @@ -1315,6 +1374,10 @@ "managedPersistenceMonitoringConfiguration":{ "shape":"ManagedPersistenceMonitoringConfiguration", "documentation":"The managed log persistence configuration for a job run.
" + }, + "cloudWatchLoggingConfiguration":{ + "shape":"CloudWatchLoggingConfiguration", + "documentation":"The Amazon CloudWatch configuration for monitoring logs. You can configure your jobs to send log information to CloudWatch.
" } }, "documentation":"The configuration setting for monitoring.
" @@ -1833,6 +1896,7 @@ }, "WorkerTypeString":{ "type":"string", + "documentation":"Worker type for an analytics framework.
", "max":50, "min":1, "pattern":"[a-zA-Z]+[-_]*[a-zA-Z]+" From d57e578deccb82e3da90e8d3cc970b06e5c02e0e Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:13:45 +0000 Subject: [PATCH 031/270] Amazon Connect Customer Profiles Update: Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences. --- ...AmazonConnectCustomerProfiles-8a1e306.json | 6 + .../codegen-resources/service-2.json | 331 +++++++++++++++++- 2 files changed, 335 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AmazonConnectCustomerProfiles-8a1e306.json diff --git a/.changes/next-release/feature-AmazonConnectCustomerProfiles-8a1e306.json b/.changes/next-release/feature-AmazonConnectCustomerProfiles-8a1e306.json new file mode 100644 index 000000000000..9a5a7e3d66a1 --- /dev/null +++ b/.changes/next-release/feature-AmazonConnectCustomerProfiles-8a1e306.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences." +} diff --git a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json index dcd2477861c9..2faa269c4324 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json @@ -62,7 +62,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.
Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.
Use this API or UpdateDomain to enable identity resolution: set Matching to true.
To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
" + "documentation":"Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.
Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.
Use this API or UpdateDomain to enable identity resolution: set Matching to true.
To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
" }, "CreateEventStream":{ "name":"CreateEventStream", @@ -439,6 +439,23 @@ ], "documentation":"Returns the template information for a specific object type.
A template is a predefined ProfileObjectType, such as “Salesforce-Account” or “Salesforce-Contact.” When a user sends a ProfileObject, using the PutProfileObject API, with an ObjectTypeName that matches one of the TemplateIds, it uses the mappings from the template.
" }, + "GetSimilarProfiles":{ + "name":"GetSimilarProfiles", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/matches" + }, + "input":{"shape":"GetSimilarProfilesRequest"}, + "output":{"shape":"GetSimilarProfilesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Returns a set of profiles that belong to the same matching group using the matchId or profileId. You can also specify the type of matching that you want for finding similar profiles using either RULE_BASED_MATCHING or ML_BASED_MATCHING.
Returns a list of objects associated with a profile of a given ProfileObjectType.
" }, + "ListRuleBasedMatches":{ + "name":"ListRuleBasedMatches", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/profiles/ruleBasedMatches" + }, + "input":{"shape":"ListRuleBasedMatchesRequest"}, + "output":{"shape":"ListRuleBasedMatchesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Returns a set of MatchIds that belong to the given domain.
Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.
After a domain is created, the name can’t be changed.
Use this API or CreateDomain to enable identity resolution: set Matching to true.
To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
To add or remove tags on an existing Domain, see TagResource/UntagResource.
" + "documentation":"Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.
After a domain is created, the name can’t be changed.
Use this API or CreateDomain to enable identity resolution: set Matching to true.
To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
To add or remove tags on an existing Domain, see TagResource/UntagResource.
" }, "UpdateProfile":{ "name":"UpdateProfile", @@ -963,6 +997,12 @@ }, "documentation":"A generic address associated with the customer that is not mailing, shipping, or billing.
" }, + "AddressList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":4, + "min":1 + }, "AppflowIntegration":{ "type":"structure", "required":["FlowDefinition"], @@ -1103,11 +1143,41 @@ "max":2, "min":1 }, + "AttributeMatchingModel":{ + "type":"string", + "enum":[ + "ONE_TO_ONE", + "MANY_TO_MANY" + ] + }, "AttributeSourceIdMap":{ "type":"map", "key":{"shape":"string1To255"}, "value":{"shape":"uuid"} }, + "AttributeTypesSelector":{ + "type":"structure", + "required":["AttributeMatchingModel"], + "members":{ + "AttributeMatchingModel":{ + "shape":"AttributeMatchingModel", + "documentation":"Configures the AttributeMatchingModel, you can either choose ONE_TO_ONE or MANY_TO_MANY.
The Address type. You can choose from Address, BusinessAddress, MaillingAddress, and ShippingAddress.
You only can use the Address type in the MatchingRule. For example, if you want to match profile based on BusinessAddress.City or MaillingAddress.City, you need to choose the BusinessAddress and the MaillingAddress to represent the Address type and specify the Address.City on the matching rule.
The PhoneNumber type. You can choose from PhoneNumber, HomePhoneNumber, and MobilePhoneNumber.
You only can use the PhoneNumber type in the MatchingRule. For example, if you want to match a profile based on Phone or HomePhone, you need to choose the Phone and the HomePhone to represent the PhoneNumber type and only specify the PhoneNumber on the matching rule.
The Email type. You can choose from EmailAddress, BusinessEmailAddress and PersonalEmailAddress.
You only can use the EmailAddress type in the MatchingRule. For example, if you want to match profile based on PersonalEmailAddress or BusinessEmailAddress, you need to choose the PersonalEmailAddress and the BusinessEmailAddress to represent the EmailAddress type and only specify the EmailAddress on the matching rule.
Configuration information about the AttributeTypesSelector where the rule-based identity resolution uses to match profiles. You can choose how profiles are compared across attribute types and which attribute to use for matching from each type. There are three attribute types you can configure:
Email type
You can choose from Email, BusinessEmail, and PersonalEmail
Phone number type
You can choose from Phone, HomePhone, and MobilePhone
Address type
You can choose from Address, BusinessAddress, MaillingAddress, and ShippingAddress
You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. When choosing MANY_TO_MANY, the system can match attribute across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email type. When choosing ONE_TO_ONE the system can only match if the sub-types are exact matches. For example, only when the value of the Email field of Profile A and the value of the Email field of Profile B matches, the two profiles are matched on the Email type.
The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.
The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.
The tags used to organize, track, or control access for this resource.
" @@ -1420,6 +1494,10 @@ "shape":"MatchingResponse", "documentation":"The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.
The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.
The timestamp of when the domain was created.
" @@ -1983,6 +2061,12 @@ "max":1.0, "min":0.0 }, + "EmailList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":3, + "min":1 + }, "EventStreamDestinationDetails":{ "type":"structure", "required":[ @@ -2471,6 +2555,10 @@ "shape":"MatchingResponse", "documentation":"The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.
The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.
The timestamp of when the domain was created.
" @@ -2850,6 +2938,76 @@ } } }, + "GetSimilarProfilesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "MatchType", + "SearchKey", + "SearchValue" + ], + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"The pagination token from the previous GetSimilarProfiles API call.
The maximum number of objects returned per page.
", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "documentation":"The unique name of the domain.
", + "location":"uri", + "locationName":"DomainName" + }, + "MatchType":{ + "shape":"MatchType", + "documentation":"Specify the type of matching to get similar profiles for.
" + }, + "SearchKey":{ + "shape":"string1To255", + "documentation":"The string indicating the search key to be used.
" + }, + "SearchValue":{ + "shape":"string1To255", + "documentation":"The string based on SearchKey to be searched for similar profiles.
Set of profileIds that belong to the same matching group.
The string matchId that the similar profiles belong to.
Specify the type of matching to get similar profiles for.
" + }, + "RuleLevel":{ + "shape":"RuleLevel", + "documentation":"The integer rule level that the profiles matched on.
" + }, + "ConfidenceScore":{ + "shape":"Double", + "documentation":"It only has value when the MatchType is ML_BASED_MATCHING.A number between 0 and 1, where a higher score means higher similarity. Examining match confidence scores lets you distinguish between groups of similar records in which the system is highly confident (which you may decide to merge), groups of similar records about which the system is uncertain (which you may decide to have reviewed by a human), and groups of similar records that the system deems to be unlikely (which you may decide to reject). Given confidence scores vary as per the data input, it should not be used as an absolute measure of matching quality.
The pagination token from the previous GetSimilarProfiles API call.
The pagination token from the previous ListRuleBasedMatches API call.
The maximum number of MatchIds returned per page.
The unique name of the domain.
", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "ListRuleBasedMatchesResponse":{ + "type":"structure", + "members":{ + "MatchIds":{ + "shape":"MatchIdList", + "documentation":"The list of MatchIds for the given domain.
The pagination token from the previous ListRuleBasedMatches API call.
The properties that are applied when Marketo is being used as a source.
" }, + "MatchIdList":{ + "type":"list", + "member":{"shape":"string1To255"} + }, "MatchItem":{ "type":"structure", "members":{ @@ -3861,6 +4060,13 @@ }, "documentation":"The Match group object.
" }, + "MatchType":{ + "type":"string", + "enum":[ + "RULE_BASED_MATCHING", + "ML_BASED_MATCHING" + ] + }, "MatchesList":{ "type":"list", "member":{"shape":"MatchItem"} @@ -3922,6 +4128,39 @@ }, "documentation":"The flag that enables the matching process of duplicate profiles.
" }, + "MatchingRule":{ + "type":"structure", + "required":["Rule"], + "members":{ + "Rule":{ + "shape":"MatchingRuleAttributeList", + "documentation":"A single rule level of the MatchRules. Configures how the rule-based matching process should match profiles.
Specifies how does the rule-based matching process should match profiles. You can choose from the following attributes to build the matching Rule:
AccountNumber
Address.Address
Address.City
Address.Country
Address.County
Address.PostalCode
Address.State
Address.Province
BirthDate
BusinessName
EmailAddress
FirstName
Gender
LastName
MiddleName
PhoneNumber
Any customized profile attributes that start with the Attributes
The flag that enables the rule-based matching process of duplicate profiles.
" + }, + "MatchingRules":{ + "shape":"MatchingRules", + "documentation":"Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule in the MatchingRules.
Indicates the maximum allowed rule level.
" + }, + "AttributeTypesSelector":{ + "shape":"AttributeTypesSelector", + "documentation":"Configures information about the AttributeTypesSelector where the rule-based identity resolution uses to match profiles.
The request to enable the rule-based matching.
" + }, + "RuleBasedMatchingResponse":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"optionalBoolean", + "documentation":"The flag that enables the rule-based matching process of duplicate profiles.
" + }, + "MatchingRules":{ + "shape":"MatchingRules", + "documentation":"Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule in the MatchingRules.
PENDING
The first status after configuration a rule-based matching rule. If it is an existing domain, the rule-based Identity Resolution waits one hour before creating the matching rule. If it is a new domain, the system will skip the PENDING stage.
IN_PROGRESS
The system is creating the rule-based matching rule. Under this status, the system is evaluating the existing data and you can no longer change the Rule-based matching configuration.
ACTIVE
The rule is ready to use. You can change the rule a day after the status is in ACTIVE.
Indicates the maximum allowed rule level.
" + }, + "AttributeTypesSelector":{ + "shape":"AttributeTypesSelector", + "documentation":"Configures information about the AttributeTypesSelector where the rule-based identity resolution uses to match profiles.
The response of the Rule-based matching request.
" + }, + "RuleBasedMatchingStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "ACTIVE" + ] + }, + "RuleLevel":{ + "type":"integer", + "max":15, + "min":1 + }, "S3ConnectorOperator":{ "type":"string", "enum":[ @@ -5172,6 +5491,10 @@ "shape":"MatchingRequest", "documentation":"The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.
The process of matching duplicate profiles using the rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.
The tags used to organize, track, or control access for this resource.
" @@ -5206,6 +5529,10 @@ "shape":"MatchingResponse", "documentation":"The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.
After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.
The process of matching duplicate profiles using the rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.
The timestamp of when the domain was created.
" From c011fe7d4ee71c064cf97be86b86d7a198dd846b Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 25 Jul 2023 18:15:48 +0000 Subject: [PATCH 032/270] Release 2.20.111. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.111.json | 90 +++++++++++++++++++ .../bugfix-AWSSDKforJavav2-3bb4487.json | 6 -- .../feature-AWSBillingConductor-409d2bd.json | 6 -- .../feature-AWSDataSync-1cb444c.json | 6 -- .../feature-AWSLambda-bad128e.json | 6 -- .../feature-AWSSecurityHub-599cc29.json | 6 -- ...ature-AWSSecurityTokenService-165da70.json | 6 -- .../feature-AWSTransferFamily-9428e69.json | 6 -- ...AmazonConnectCustomerProfiles-8a1e306.json | 6 -- ...re-AmazonConnectWisdomService-e5cde13.json | 6 -- .../feature-AmazonDynamoDB-981ae1b.json | 6 -- ...ure-AmazonElasticComputeCloud-750fa4b.json | 6 -- ...azonRelationalDatabaseService-76b6211.json | 6 -- ...eature-AmazonSageMakerService-c8218de.json | 6 -- .../feature-EMRServerless-a99eac6.json | 6 -- CHANGELOG.md | 57 ++++++++++++ README.md | 8 +- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- 431 files changed, 565 insertions(+), 502 deletions(-) create mode 100644 .changes/2.20.111.json delete mode 100644 .changes/next-release/bugfix-AWSSDKforJavav2-3bb4487.json delete mode 100644 .changes/next-release/feature-AWSBillingConductor-409d2bd.json delete mode 100644 .changes/next-release/feature-AWSDataSync-1cb444c.json delete mode 100644 .changes/next-release/feature-AWSLambda-bad128e.json delete mode 100644 .changes/next-release/feature-AWSSecurityHub-599cc29.json delete mode 100644 .changes/next-release/feature-AWSSecurityTokenService-165da70.json delete mode 100644 .changes/next-release/feature-AWSTransferFamily-9428e69.json delete mode 100644 .changes/next-release/feature-AmazonConnectCustomerProfiles-8a1e306.json delete mode 100644 .changes/next-release/feature-AmazonConnectWisdomService-e5cde13.json delete mode 100644 .changes/next-release/feature-AmazonDynamoDB-981ae1b.json delete mode 100644 .changes/next-release/feature-AmazonElasticComputeCloud-750fa4b.json delete mode 100644 .changes/next-release/feature-AmazonRelationalDatabaseService-76b6211.json delete mode 100644 .changes/next-release/feature-AmazonSageMakerService-c8218de.json delete mode 100644 .changes/next-release/feature-EMRServerless-a99eac6.json diff --git a/.changes/2.20.111.json b/.changes/2.20.111.json new file mode 100644 index 000000000000..f6337e44b747 --- /dev/null +++ b/.changes/2.20.111.json @@ -0,0 +1,90 @@ +{ + "version": "2.20.111", + "date": "2023-07-25", + "entries": [ + { + "type": "bugfix", + "category": "AWS IAM Policy Builder", + "contributor": "", + "description": "Fixed bug where actions were written instead of resources." + }, + { + "type": "feature", + "category": "AWSBillingConductor", + "contributor": "", + "description": "Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups." + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "AWS DataSync now supports Microsoft Azure Blob Storage locations." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Add Python 3.11 (python3.11) support to AWS Lambda" + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters" + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "contributor": "", + "description": "API updates for the AWS Security Token Service" + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "This release adds support for SFTP Connectors." + }, + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences." + }, + { + "type": "feature", + "category": "Amazon Connect Wisdom Service", + "contributor": "", + "description": "This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "Documentation updates for DynamoDB" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release adds support for monitoring storage optimization progress on the DescribeDBInstances API." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API" + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "This release adds support for publishing application logs to CloudWatch." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/bugfix-AWSSDKforJavav2-3bb4487.json b/.changes/next-release/bugfix-AWSSDKforJavav2-3bb4487.json deleted file mode 100644 index fe54526399c9..000000000000 --- a/.changes/next-release/bugfix-AWSSDKforJavav2-3bb4487.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "bugfix", - "category": "AWS IAM Policy Builder", - "contributor": "", - "description": "Fixed bug where actions were written instead of resources." -} diff --git a/.changes/next-release/feature-AWSBillingConductor-409d2bd.json b/.changes/next-release/feature-AWSBillingConductor-409d2bd.json deleted file mode 100644 index 37209b57c473..000000000000 --- a/.changes/next-release/feature-AWSBillingConductor-409d2bd.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWSBillingConductor", - "contributor": "", - "description": "Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups." -} diff --git a/.changes/next-release/feature-AWSDataSync-1cb444c.json b/.changes/next-release/feature-AWSDataSync-1cb444c.json deleted file mode 100644 index 52cd67a7986a..000000000000 --- a/.changes/next-release/feature-AWSDataSync-1cb444c.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS DataSync", - "contributor": "", - "description": "AWS DataSync now supports Microsoft Azure Blob Storage locations." -} diff --git a/.changes/next-release/feature-AWSLambda-bad128e.json b/.changes/next-release/feature-AWSLambda-bad128e.json deleted file mode 100644 index 3451a21e296a..000000000000 --- a/.changes/next-release/feature-AWSLambda-bad128e.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Lambda", - "contributor": "", - "description": "Add Python 3.11 (python3.11) support to AWS Lambda" -} diff --git a/.changes/next-release/feature-AWSSecurityHub-599cc29.json b/.changes/next-release/feature-AWSSecurityHub-599cc29.json deleted file mode 100644 index 1203c32734b4..000000000000 --- a/.changes/next-release/feature-AWSSecurityHub-599cc29.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS SecurityHub", - "contributor": "", - "description": "Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters" -} diff --git a/.changes/next-release/feature-AWSSecurityTokenService-165da70.json b/.changes/next-release/feature-AWSSecurityTokenService-165da70.json deleted file mode 100644 index 5741e0c3253f..000000000000 --- a/.changes/next-release/feature-AWSSecurityTokenService-165da70.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Security Token Service", - "contributor": "", - "description": "API updates for the AWS Security Token Service" -} diff --git a/.changes/next-release/feature-AWSTransferFamily-9428e69.json b/.changes/next-release/feature-AWSTransferFamily-9428e69.json deleted file mode 100644 index a7cf474604dd..000000000000 --- a/.changes/next-release/feature-AWSTransferFamily-9428e69.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Transfer Family", - "contributor": "", - "description": "This release adds support for SFTP Connectors." -} diff --git a/.changes/next-release/feature-AmazonConnectCustomerProfiles-8a1e306.json b/.changes/next-release/feature-AmazonConnectCustomerProfiles-8a1e306.json deleted file mode 100644 index 9a5a7e3d66a1..000000000000 --- a/.changes/next-release/feature-AmazonConnectCustomerProfiles-8a1e306.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Connect Customer Profiles", - "contributor": "", - "description": "Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences." -} diff --git a/.changes/next-release/feature-AmazonConnectWisdomService-e5cde13.json b/.changes/next-release/feature-AmazonConnectWisdomService-e5cde13.json deleted file mode 100644 index c3ac1cc1c4d6..000000000000 --- a/.changes/next-release/feature-AmazonConnectWisdomService-e5cde13.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Connect Wisdom Service", - "contributor": "", - "description": "This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat" -} diff --git a/.changes/next-release/feature-AmazonDynamoDB-981ae1b.json b/.changes/next-release/feature-AmazonDynamoDB-981ae1b.json deleted file mode 100644 index 688659934481..000000000000 --- a/.changes/next-release/feature-AmazonDynamoDB-981ae1b.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon DynamoDB", - "contributor": "", - "description": "Documentation updates for DynamoDB" -} diff --git a/.changes/next-release/feature-AmazonElasticComputeCloud-750fa4b.json b/.changes/next-release/feature-AmazonElasticComputeCloud-750fa4b.json deleted file mode 100644 index 56269bc4d278..000000000000 --- a/.changes/next-release/feature-AmazonElasticComputeCloud-750fa4b.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Elastic Compute Cloud", - "contributor": "", - "description": "This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes." -} diff --git a/.changes/next-release/feature-AmazonRelationalDatabaseService-76b6211.json b/.changes/next-release/feature-AmazonRelationalDatabaseService-76b6211.json deleted file mode 100644 index f199255aeb04..000000000000 --- a/.changes/next-release/feature-AmazonRelationalDatabaseService-76b6211.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Relational Database Service", - "contributor": "", - "description": "This release adds support for monitoring storage optimization progress on the DescribeDBInstances API." -} diff --git a/.changes/next-release/feature-AmazonSageMakerService-c8218de.json b/.changes/next-release/feature-AmazonSageMakerService-c8218de.json deleted file mode 100644 index 51f73789ffa0..000000000000 --- a/.changes/next-release/feature-AmazonSageMakerService-c8218de.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon SageMaker Service", - "contributor": "", - "description": "Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API" -} diff --git a/.changes/next-release/feature-EMRServerless-a99eac6.json b/.changes/next-release/feature-EMRServerless-a99eac6.json deleted file mode 100644 index 847926e5532e..000000000000 --- a/.changes/next-release/feature-EMRServerless-a99eac6.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "EMR Serverless", - "contributor": "", - "description": "This release adds support for publishing application logs to CloudWatch." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 0839e85ecda1..ec8d505f0dc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,60 @@ +# __2.20.111__ __2023-07-25__ +## __AWS DataSync__ + - ### Features + - AWS DataSync now supports Microsoft Azure Blob Storage locations. + +## __AWS IAM Policy Builder__ + - ### Bugfixes + - Fixed bug where actions were written instead of resources. + +## __AWS Lambda__ + - ### Features + - Add Python 3.11 (python3.11) support to AWS Lambda + +## __AWS Security Token Service__ + - ### Features + - API updates for the AWS Security Token Service + +## __AWS SecurityHub__ + - ### Features + - Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters + +## __AWS Transfer Family__ + - ### Features + - This release adds support for SFTP Connectors. + +## __AWSBillingConductor__ + - ### Features + - Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups. + +## __Amazon Connect Customer Profiles__ + - ### Features + - Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences. + +## __Amazon Connect Wisdom Service__ + - ### Features + - This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat + +## __Amazon DynamoDB__ + - ### Features + - Documentation updates for DynamoDB + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for monitoring storage optimization progress on the DescribeDBInstances API. + +## __Amazon SageMaker Service__ + - ### Features + - Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API + +## __EMR Serverless__ + - ### Features + - This release adds support for publishing application logs to CloudWatch. + # __2.20.110__ __2023-07-24__ ## __AWS CloudFormation__ - ### Features diff --git a/README.md b/README.md index a1bbdae7a5a2..cdb7f0f77fda 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same verCreates a Data Store that can ingest and export FHIR formatted data.
" + "documentation":"Creates a data store that can ingest and export FHIR formatted data.
" }, "DeleteFHIRDatastore":{ "name":"DeleteFHIRDatastore", @@ -46,7 +46,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"Deletes a Data Store.
" + "documentation":"Deletes a data store.
" }, "DescribeFHIRDatastore":{ "name":"DescribeFHIRDatastore", @@ -62,7 +62,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"Gets the properties associated with the FHIR Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.
" + "documentation":"Gets the properties associated with the FHIR data store, including the data store ID, data store ARN, data store name, data store status, when the data store was created, data store type version, and the data store's endpoint.
" }, "DescribeFHIRExportJob":{ "name":"DescribeFHIRExportJob", @@ -109,7 +109,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"Lists all FHIR Data Stores that are in the user’s account, regardless of Data Store status.
" + "documentation":"Lists all FHIR data stores that are in the user’s account, regardless of data store status.
" }, "ListFHIRExportJobs":{ "name":"ListFHIRExportJobs", @@ -157,7 +157,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Returns a list of all existing tags associated with a Data Store.
" + "documentation":"Returns a list of all existing tags associated with a data store.
" }, "StartFHIRExportJob":{ "name":"StartFHIRExportJob", @@ -205,7 +205,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Adds a user specified key and value tag to a Data Store.
" + "documentation":"Adds a user specified key and value tag to a data store.
" }, "UntagResource":{ "name":"UntagResource", @@ -219,7 +219,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"Removes tags from a Data Store.
" + "documentation":"Removes tags from a data store.
" } }, "shapes":{ @@ -270,7 +270,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"The Data Store is in a transition state and the user requested action can not be performed.
", + "documentation":"The data store is in a transition state and the user requested action can not be performed.
", "exception":true }, "CreateFHIRDatastoreRequest":{ @@ -279,19 +279,19 @@ "members":{ "DatastoreName":{ "shape":"DatastoreName", - "documentation":"The user generated name for the Data Store.
" + "documentation":"The user generated name for the data store.
" }, "DatastoreTypeVersion":{ "shape":"FHIRVersion", - "documentation":"The FHIR version of the Data Store. The only supported version is R4.
" + "documentation":"The FHIR version of the data store. The only supported version is R4.
" }, "SseConfiguration":{ "shape":"SseConfiguration", - "documentation":"The server-side encryption key configuration for a customer provided encryption key specified for creating a Data Store.
" + "documentation":"The server-side encryption key configuration for a customer provided encryption key specified for creating a data store.
" }, "PreloadDataConfig":{ "shape":"PreloadDataConfig", - "documentation":"Optional parameter to preload data upon creation of the Data Store. Currently, the only supported preloaded data is synthetic data generated from Synthea.
" + "documentation":"Optional parameter to preload data upon creation of the data store. Currently, the only supported preloaded data is synthetic data generated from Synthea.
" }, "ClientToken":{ "shape":"ClientTokenString", @@ -300,11 +300,11 @@ }, "Tags":{ "shape":"TagList", - "documentation":"Resource tags that are applied to a Data Store when it is created.
" + "documentation":"Resource tags that are applied to a data store when it is created.
" }, "IdentityProviderConfiguration":{ "shape":"IdentityProviderConfiguration", - "documentation":"The configuration of the identity provider that you want to use for your Data Store.
" + "documentation":"The configuration of the identity provider that you want to use for your data store.
" } } }, @@ -319,19 +319,19 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS-generated Data Store id. This id is in the output from the initial Data Store creation call.
" + "documentation":"The AWS-generated data store id. This id is in the output from the initial data store creation call.
" }, "DatastoreArn":{ "shape":"DatastoreArn", - "documentation":"The Data Store ARN is generated during the creation of the Data Store and can be found in the output from the initial Data Store creation call.
" + "documentation":"The data store ARN is generated during the creation of the data store and can be found in the output from the initial data store creation call.
" }, "DatastoreStatus":{ "shape":"DatastoreStatus", - "documentation":"The status of the FHIR Data Store. Possible statuses are ‘CREATING’, ‘ACTIVE’, ‘DELETING’, ‘DELETED’.
" + "documentation":"The status of the FHIR data store.
" }, "DatastoreEndpoint":{ "shape":"BoundedLengthString", - "documentation":"The AWS endpoint for the created Data Store.
" + "documentation":"The AWS endpoint for the created data store.
" } } }, @@ -344,22 +344,22 @@ "members":{ "DatastoreName":{ "shape":"DatastoreName", - "documentation":"Allows the user to filter Data Store results by name.
" + "documentation":"Allows the user to filter data store results by name.
" }, "DatastoreStatus":{ "shape":"DatastoreStatus", - "documentation":"Allows the user to filter Data Store results by status.
" + "documentation":"Allows the user to filter data store results by status.
" }, "CreatedBefore":{ "shape":"Timestamp", - "documentation":"A filter that allows the user to set cutoff dates for records. All Data Stores created before the specified date will be included in the results.
" + "documentation":"A filter that allows the user to set cutoff dates for records. All data stores created before the specified date will be included in the results.
" }, "CreatedAfter":{ "shape":"Timestamp", - "documentation":"A filter that allows the user to set cutoff dates for records. All Data Stores created after the specified date will be included in the results.
" + "documentation":"A filter that allows the user to set cutoff dates for records. All data stores created after the specified date will be included in the results.
" } }, - "documentation":"The filters applied to Data Store query.
" + "documentation":"The filters applied to data store query.
" }, "DatastoreId":{ "type":"string", @@ -385,23 +385,23 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS-generated ID number for the Data Store.
" + "documentation":"The AWS-generated ID number for the data store.
" }, "DatastoreArn":{ "shape":"DatastoreArn", - "documentation":"The Amazon Resource Name used in the creation of the Data Store.
" + "documentation":"The Amazon Resource Name used in the creation of the data store.
" }, "DatastoreName":{ "shape":"DatastoreName", - "documentation":"The user-generated name for the Data Store.
" + "documentation":"The user-generated name for the data store.
" }, "DatastoreStatus":{ "shape":"DatastoreStatus", - "documentation":"The status of the Data Store. Possible statuses are 'CREATING', 'ACTIVE', 'DELETING', or 'DELETED'.
" + "documentation":"The status of the data store.
" }, "CreatedAt":{ "shape":"Timestamp", - "documentation":"The time that a Data Store was created.
" + "documentation":"The time that a data store was created.
" }, "DatastoreTypeVersion":{ "shape":"FHIRVersion", @@ -409,7 +409,7 @@ }, "DatastoreEndpoint":{ "shape":"String", - "documentation":"The AWS endpoint for the Data Store. Each Data Store will have it's own endpoint with Data Store ID in the endpoint URL.
" + "documentation":"The AWS endpoint for the data store. Each data store will have it's own endpoint with data store ID in the endpoint URL.
" }, "SseConfiguration":{ "shape":"SseConfiguration", @@ -417,14 +417,14 @@ }, "PreloadDataConfig":{ "shape":"PreloadDataConfig", - "documentation":"The preloaded data configuration for the Data Store. Only data preloaded from Synthea is supported.
" + "documentation":"The preloaded data configuration for the data store. Only data preloaded from Synthea is supported.
" }, "IdentityProviderConfiguration":{ "shape":"IdentityProviderConfiguration", - "documentation":"The identity provider that you selected when you created the Data Store.
" + "documentation":"The identity provider that you selected when you created the data store.
" } }, - "documentation":"Displays the properties of the Data Store, including the ID, ARN, name, and the status of the Data Store.
" + "documentation":"Displays the properties of the data store, including the ID, ARN, name, and the status of the data store.
" }, "DatastorePropertiesList":{ "type":"list", @@ -445,7 +445,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS-generated ID for the Data Store to be deleted.
" + "documentation":"The AWS-generated ID for the data store to be deleted.
" } } }, @@ -460,19 +460,19 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS-generated ID for the Data Store to be deleted.
" + "documentation":"The AWS-generated ID for the data store to be deleted.
" }, "DatastoreArn":{ "shape":"DatastoreArn", - "documentation":"The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission.
" + "documentation":"The Amazon Resource Name (ARN) that gives AWS HealthLake access permission.
" }, "DatastoreStatus":{ "shape":"DatastoreStatus", - "documentation":"The status of the Data Store that the user has requested to be deleted.
" + "documentation":"The status of the data store that the user has requested to be deleted.
" }, "DatastoreEndpoint":{ "shape":"BoundedLengthString", - "documentation":"The AWS endpoint for the Data Store the user has requested to be deleted.
" + "documentation":"The AWS endpoint for the data store the user has requested to be deleted.
" } } }, @@ -482,7 +482,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS-generated Data Store ID.
" + "documentation":"The AWS-generated data store ID.
" } } }, @@ -492,7 +492,7 @@ "members":{ "DatastoreProperties":{ "shape":"DatastoreProperties", - "documentation":"All properties associated with a Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.
" + "documentation":"All properties associated with a data store, including the data store ID, data store ARN, data store name, data store status, when the data store was created, data store type version, and the data store's endpoint.
" } } }, @@ -505,7 +505,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS generated ID for the Data Store from which files are being exported from for an export job.
" + "documentation":"The AWS generated ID for the data store from which files are being exported from for an export job.
" }, "JobId":{ "shape":"JobId", @@ -532,7 +532,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS-generated ID of the Data Store.
" + "documentation":"The AWS-generated ID of the data store.
" }, "JobId":{ "shape":"JobId", @@ -588,7 +588,7 @@ }, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS generated ID for the Data Store from which files are being exported for an export job.
" + "documentation":"The AWS generated ID for the data store from which files are being exported for an export job.
" }, "OutputDataConfig":{ "shape":"OutputDataConfig", @@ -625,11 +625,11 @@ "members":{ "AuthorizationStrategy":{ "shape":"AuthorizationStrategy", - "documentation":"The authorization strategy that you selected when you created the Data Store.
" + "documentation":"The authorization strategy that you selected when you created the data store.
" }, "FineGrainedAuthorizationEnabled":{ "shape":"Boolean", - "documentation":"If you enabled fine-grained authorization when you created the Data Store.
" + "documentation":"If you enabled fine-grained authorization when you created the data store.
" }, "Metadata":{ "shape":"ConfigurationMetadata", @@ -640,7 +640,7 @@ "documentation":"The Amazon Resource Name (ARN) of the Lambda function that you want to use to decode the access token created by the authorization server.
" } }, - "documentation":"The identity provider configuration that you gave when the Data Store was created.
" + "documentation":"The identity provider configuration that you gave when the data store was created.
" }, "ImportJobProperties":{ "type":"structure", @@ -683,14 +683,14 @@ "JobOutputDataConfig":{"shape":"OutputDataConfig"}, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your input data.
" + "documentation":"The Amazon Resource Name (ARN) that gives AWS HealthLake access to your input data.
" }, "Message":{ "shape":"Message", "documentation":"An explanation of any errors that may have occurred during the FHIR import job.
" } }, - "documentation":"Displays the properties of the import job, including the ID, Arn, Name, and the status of the Data Store.
" + "documentation":"Displays the properties of the import job, including the ID, Arn, Name, and the status of the data store.
" }, "ImportJobPropertiesList":{ "type":"list", @@ -701,7 +701,7 @@ "members":{ "S3Uri":{ "shape":"S3Uri", - "documentation":"The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.
" + "documentation":"The S3Uri is the user specified S3 location of the FHIR data to be imported into AWS HealthLake.
" } }, "documentation":"The input properties for an import job.
", @@ -748,14 +748,14 @@ "members":{ "CmkType":{ "shape":"CmkType", - "documentation":"The type of customer-managed-key(CMK) used for encyrption. The two types of supported CMKs are customer owned CMKs and AWS owned CMKs.
" + "documentation":"The type of customer-managed-key(CMK) used for encryption. The two types of supported CMKs are customer owned CMKs and AWS owned CMKs.
" }, "KmsKeyId":{ "shape":"EncryptionKeyID", - "documentation":"The KMS encryption key id/alias used to encrypt the Data Store contents at rest.
" + "documentation":"The KMS encryption key id/alias used to encrypt the data store contents at rest.
" } }, - "documentation":"The customer-managed-key(CMK) used when creating a Data Store. If a customer owned key is not specified, an AWS owned key will be used for encryption.
" + "documentation":"The customer-managed-key(CMK) used when creating a data store. If a customer owned key is not specified, an AWS owned key will be used for encryption.
" }, "LambdaArn":{ "type":"string", @@ -768,15 +768,15 @@ "members":{ "Filter":{ "shape":"DatastoreFilter", - "documentation":"Lists all filters associated with a FHIR Data Store request.
" + "documentation":"Lists all filters associated with a FHIR data store request.
" }, "NextToken":{ "shape":"NextToken", - "documentation":"Fetches the next page of Data Stores when results are paginated.
" + "documentation":"Fetches the next page of data stores when results are paginated.
" }, "MaxResults":{ "shape":"MaxResultsInteger", - "documentation":"The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest call.
" + "documentation":"The maximum number of data stores returned in a single page of a ListFHIRDatastoresRequest call.
" } } }, @@ -786,7 +786,7 @@ "members":{ "DatastorePropertiesList":{ "shape":"DatastorePropertiesList", - "documentation":"All properties associated with the listed Data Stores.
" + "documentation":"All properties associated with the listed data stores.
" }, "NextToken":{ "shape":"NextToken", @@ -800,7 +800,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"This parameter limits the response to the export job with the specified Data Store ID.
" + "documentation":"This parameter limits the response to the export job with the specified data store ID.
" }, "NextToken":{ "shape":"NextToken", @@ -848,7 +848,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"This parameter limits the response to the import job with the specified Data Store ID.
" + "documentation":"This parameter limits the response to the import job with the specified data store ID.
" }, "NextToken":{ "shape":"NextToken", @@ -896,7 +896,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"The Amazon Resource Name(ARN) of the Data Store for which tags are being added.
" + "documentation":"The Amazon Resource Name(ARN) of the data store for which tags are being added.
" } } }, @@ -905,7 +905,7 @@ "members":{ "Tags":{ "shape":"TagList", - "documentation":"Returns a list of tags associated with a Data Store.
" + "documentation":"Returns a list of tags associated with a data store.
" } } }, @@ -945,7 +945,7 @@ "documentation":"The type of preloaded data. Only Synthea preloaded data is supported.
" } }, - "documentation":"The input properties for the preloaded Data Store. Only data preloaded from Synthea is supported.
" + "documentation":"The input properties for the preloaded data store. Only data preloaded from Synthea is supported.
" }, "PreloadDataType":{ "type":"string", @@ -956,7 +956,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"The requested Data Store was not found.
", + "documentation":"The requested data store was not found.
", "exception":true }, "S3Configuration":{ @@ -968,7 +968,7 @@ "members":{ "S3Uri":{ "shape":"S3Uri", - "documentation":"The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.
" + "documentation":"The S3Uri is the user specified S3 location of the FHIR data to be imported into AWS HealthLake.
" }, "KmsKeyId":{ "shape":"EncryptionKeyID", @@ -1012,7 +1012,7 @@ }, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS generated ID for the Data Store from which files are being exported for an export job.
" + "documentation":"The AWS generated ID for the data store from which files are being exported for an export job.
" }, "DataAccessRoleArn":{ "shape":"IamRoleArn", @@ -1042,7 +1042,7 @@ }, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS generated ID for the Data Store from which files are being exported for an export job.
" + "documentation":"The AWS generated ID for the data store from which files are being exported for an export job.
" } } }, @@ -1067,11 +1067,11 @@ "JobOutputDataConfig":{"shape":"OutputDataConfig"}, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS-generated Data Store ID.
" + "documentation":"The AWS-generated data store ID.
" }, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission.
" + "documentation":"The Amazon Resource Name (ARN) that gives AWS HealthLake access permission.
" }, "ClientToken":{ "shape":"ClientTokenString", @@ -1097,7 +1097,7 @@ }, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"The AWS-generated Data Store ID.
" + "documentation":"The AWS-generated data store ID.
" } } }, @@ -1151,11 +1151,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the Data Store which tags are being added to.
" + "documentation":"The Amazon Resource Name(ARN)that gives AWS HealthLake access to the data store which tags are being added to.
" }, "Tags":{ "shape":"TagList", - "documentation":"The user specified key and value pair tags being added to a Data Store.
" + "documentation":"The user specified key and value pair tags being added to a data store.
" } } }, @@ -1188,11 +1188,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"\"The Amazon Resource Name(ARN) of the Data Store for which tags are being removed
" + "documentation":"The Amazon Resource Name(ARN) of the data store for which tags are being removed.
" }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"The keys for the tags to be removed from the Healthlake Data Store.
" + "documentation":"The keys for the tags to be removed from the HealthLake data store.
" } } }, @@ -1210,5 +1210,5 @@ "exception":true } }, - "documentation":"Amazon HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their FHIR-formatted data in a consistent fashion in the cloud.
" + "documentation":"AWS HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their FHIR-formatted data in a consistent fashion in the cloud.
" } From a94dd2061623376cef6abf31706dd544644ef20b Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:24:46 +0000 Subject: [PATCH 035/270] Amazon Omics Update: The service is renaming as a part of AWS Health. --- .../feature-AmazonOmics-77634b7.json | 6 ++ .../codegen-resources/endpoint-tests.json | 86 +++++++++---------- .../codegen-resources/service-2.json | 2 +- 3 files changed, 50 insertions(+), 44 deletions(-) create mode 100644 .changes/next-release/feature-AmazonOmics-77634b7.json diff --git a/.changes/next-release/feature-AmazonOmics-77634b7.json b/.changes/next-release/feature-AmazonOmics-77634b7.json new file mode 100644 index 000000000000..6bc9604671b3 --- /dev/null +++ b/.changes/next-release/feature-AmazonOmics-77634b7.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "The service is renaming as a part of AWS Health." +} diff --git a/services/omics/src/main/resources/codegen-resources/endpoint-tests.json b/services/omics/src/main/resources/codegen-resources/endpoint-tests.json index 8ff05e264f8e..de9c6b650010 100644 --- a/services/omics/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/omics/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseFIPS": true } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseFIPS": true } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseFIPS": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseFIPS": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseFIPS": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseFIPS": true } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseFIPS": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseFIPS": false } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseFIPS": true } }, { @@ -123,9 +123,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseFIPS": true } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseFIPS": false } }, { @@ -147,9 +147,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseFIPS": false } }, { @@ -160,9 +160,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseFIPS": true } }, { @@ -173,9 +173,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseFIPS": true } }, { @@ -186,9 +186,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseFIPS": false } }, { @@ -199,9 +199,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseFIPS": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseFIPS": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseFIPS": true, "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseFIPS": true } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseFIPS": false } }, { @@ -247,9 +247,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseFIPS": false } }, { @@ -260,9 +260,9 @@ } }, "params": { - "UseFIPS": false, "UseDualStack": false, "Region": "us-east-1", + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "UseDualStack": false, "Region": "us-east-1", + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "UseDualStack": true, "Region": "us-east-1", + "UseFIPS": false, "Endpoint": "https://example.com" } } diff --git a/services/omics/src/main/resources/codegen-resources/service-2.json b/services/omics/src/main/resources/codegen-resources/service-2.json index 6279f1c3f237..02478977fc0c 100644 --- a/services/omics/src/main/resources/codegen-resources/service-2.json +++ b/services/omics/src/main/resources/codegen-resources/service-2.json @@ -8267,5 +8267,5 @@ "min":1 } }, - "documentation":"This is the Amazon Omics API Reference. For an introduction to the service, see What is Amazon Omics? in the Amazon Omics User Guide.
" + "documentation":"This is the AWS HealthOmics API Reference. For an introduction to the service, see What is AWS HealthOmics? in the AWS HealthOmics User Guide.
" } From 98f4031590429c055367dbe504a876007f5bdfb6 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:24:47 +0000 Subject: [PATCH 036/270] Amazon Polly Update: Amazon Polly adds 1 new voice - Lisa (nl-BE) --- .changes/next-release/feature-AmazonPolly-c852dc1.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 6 ++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AmazonPolly-c852dc1.json diff --git a/.changes/next-release/feature-AmazonPolly-c852dc1.json b/.changes/next-release/feature-AmazonPolly-c852dc1.json new file mode 100644 index 000000000000..ee3b323d9b65 --- /dev/null +++ b/.changes/next-release/feature-AmazonPolly-c852dc1.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds 1 new voice - Lisa (nl-BE)" +} diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index 0a69ffffcb17..7b50fd662216 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -422,7 +422,8 @@ "yue-CN", "ar-AE", "fi-FI", - "en-IE" + "en-IE", + "nl-BE" ] }, "LanguageCodeList":{ @@ -1104,7 +1105,8 @@ "Kazuha", "Tomoko", "Niamh", - "Sofie" + "Sofie", + "Lisa" ] }, "VoiceList":{ From cb2af1c20885b687dc0ee58f59b36ea682113d97 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:24:50 +0000 Subject: [PATCH 037/270] OpenSearch Service Serverless Update: This release adds new collection type VectorSearch. --- .../feature-OpenSearchServiceServerless-3bc0d2a.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 7 ++++--- 2 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 .changes/next-release/feature-OpenSearchServiceServerless-3bc0d2a.json diff --git a/.changes/next-release/feature-OpenSearchServiceServerless-3bc0d2a.json b/.changes/next-release/feature-OpenSearchServiceServerless-3bc0d2a.json new file mode 100644 index 000000000000..995a4f29244c --- /dev/null +++ b/.changes/next-release/feature-OpenSearchServiceServerless-3bc0d2a.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "OpenSearch Service Serverless", + "contributor": "", + "description": "This release adds new collection type VectorSearch." +} diff --git a/services/opensearchserverless/src/main/resources/codegen-resources/service-2.json b/services/opensearchserverless/src/main/resources/codegen-resources/service-2.json index 5e39befc357c..506aa77f3291 100644 --- a/services/opensearchserverless/src/main/resources/codegen-resources/service-2.json +++ b/services/opensearchserverless/src/main/resources/codegen-resources/service-2.json @@ -821,7 +821,8 @@ "type":"string", "enum":[ "SEARCH", - "TIMESERIES" + "TIMESERIES", + "VECTORSEARCH" ] }, "ConfigDescription":{ @@ -1627,7 +1628,7 @@ "documentation":"Description of the error.
" } }, - "documentation":"OCU Limit Exceeded for service limits
", + "documentation":"Thrown when the collection you're attempting to create results in a number of search or indexing OCUs that exceeds the account limit.
", "exception":true }, "PolicyDescription":{ @@ -2435,7 +2436,7 @@ }, "samlMetadata":{ "type":"string", - "max":20480, + "max":51200, "min":1, "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A1-\\u00FF]+" }, From ef450b6b458e1c3631a121bc941d72ff7948ef9c Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:24:50 +0000 Subject: [PATCH 038/270] Amazon Managed Blockchain Query Update: Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs. --- ...-AmazonManagedBlockchainQuery-b049e7a.json | 6 + services/managedblockchainquery/pom.xml | 60 + .../codegen-resources/endpoint-rule-set.json | 350 ++++++ .../codegen-resources/endpoint-tests.json | 314 +++++ .../codegen-resources/paginators-1.json | 22 + .../codegen-resources/service-2.json | 1018 +++++++++++++++++ .../codegen-resources/waiters-2.json | 5 + 7 files changed, 1775 insertions(+) create mode 100644 .changes/next-release/feature-AmazonManagedBlockchainQuery-b049e7a.json create mode 100644 services/managedblockchainquery/pom.xml create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-rule-set.json create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-tests.json create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/paginators-1.json create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/service-2.json create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/waiters-2.json diff --git a/.changes/next-release/feature-AmazonManagedBlockchainQuery-b049e7a.json b/.changes/next-release/feature-AmazonManagedBlockchainQuery-b049e7a.json new file mode 100644 index 000000000000..6dc090e0ef34 --- /dev/null +++ b/.changes/next-release/feature-AmazonManagedBlockchainQuery-b049e7a.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Managed Blockchain Query", + "contributor": "", + "description": "Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs." +} diff --git a/services/managedblockchainquery/pom.xml b/services/managedblockchainquery/pom.xml new file mode 100644 index 000000000000..9e1f9da7243c --- /dev/null +++ b/services/managedblockchainquery/pom.xml @@ -0,0 +1,60 @@ + + + +Gets the token balance for a batch of tokens by using the GetTokenBalance action for every token in the request.
Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.
Gets the balance of a specific token, including native tokens, for a given address (wallet or contract) on the blockchain.
Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.
Get the details of a transaction.
" + }, + "ListTokenBalances":{ + "name":"ListTokenBalances", + "http":{ + "method":"POST", + "requestUri":"/list-token-balances", + "responseCode":200 + }, + "input":{"shape":"ListTokenBalancesInput"}, + "output":{"shape":"ListTokenBalancesOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"This action returns the following for a given a blockchain network:
Lists all token balances owned by an address (either a contact address or a wallet address).
Lists all token balances for all tokens created by a contract.
Lists all token balances for a given token.
You must always specify the network property of the tokenFilter when using this operation.
An array of TransactionEvent objects. Each object contains details about the transaction event.
Lists all of the transactions on a given wallet address or to a specific contract.
" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"The container for the exception message.
" + } + }, + "documentation":"The Amazon Web Services account doesn’t have access to this resource.
", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "BatchGetTokenBalanceErrorItem":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "errorType" + ], + "members":{ + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "errorCode":{ + "shape":"String", + "documentation":"The error code associated with the error.
" + }, + "errorMessage":{ + "shape":"String", + "documentation":"The message associated with the error.
" + }, + "errorType":{ + "shape":"ErrorType", + "documentation":"The type of error.
" + } + }, + "documentation":"Error generated from a failed BatchGetTokenBalance request.
An array of GetTokenBalanceInput objects whose balance is being requested.
The container for the input for getting a token balance.
" + }, + "BatchGetTokenBalanceOutput":{ + "type":"structure", + "required":[ + "tokenBalances", + "errors" + ], + "members":{ + "tokenBalances":{ + "shape":"BatchGetTokenBalanceOutputList", + "documentation":"An array of BatchGetTokenBalanceOutputItem objects returned by the response.
An array of BatchGetTokenBalanceErrorItem objects returned from the request.
The container for the token balance.
" + }, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "lastUpdatedTime":{"shape":"BlockchainInstant"} + }, + "documentation":"The container for the properties of a token balance output.
" + }, + "BatchGetTokenBalanceOutputList":{ + "type":"list", + "member":{"shape":"BatchGetTokenBalanceOutputItem"}, + "max":10, + "min":0 + }, + "BlockHash":{ + "type":"string", + "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" + }, + "BlockchainInstant":{ + "type":"structure", + "members":{ + "time":{ + "shape":"Timestamp", + "documentation":"The container of the Timestamp of the blockchain instant.
This timestamp will only be recorded up to the second.
The container for time.
" + }, + "ChainAddress":{ + "type":"string", + "pattern":"[-A-Za-z0-9]{13,74}" + }, + "ErrorType":{ + "type":"string", + "enum":[ + "VALIDATION_EXCEPTION", + "RESOURCE_NOT_FOUND_EXCEPTION" + ] + }, + "ExceptionMessage":{ + "type":"string", + "min":1 + }, + "GetTokenBalanceInput":{ + "type":"structure", + "required":[ + "tokenIdentifier", + "ownerIdentifier" + ], + "members":{ + "tokenIdentifier":{ + "shape":"TokenIdentifier", + "documentation":"The container for the identifier for the token, including the unique token ID and its blockchain network.
" + }, + "ownerIdentifier":{ + "shape":"OwnerIdentifier", + "documentation":"The container for the identifier for the owner.
" + }, + "atBlockchainInstant":{ + "shape":"BlockchainInstant", + "documentation":"The time for when the TokenBalance is requested or the current time if a time is not provided in the request.
This time will only be recorded up to the second.
The container for the token balance.
" + }, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "lastUpdatedTime":{"shape":"BlockchainInstant"} + } + }, + "GetTransactionInput":{ + "type":"structure", + "required":[ + "transactionHash", + "network" + ], + "members":{ + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + }, + "network":{ + "shape":"QueryNetwork", + "documentation":"The blockchain network where the transaction occurred.
" + } + } + }, + "GetTransactionOutput":{ + "type":"structure", + "required":["transaction"], + "members":{ + "transaction":{ + "shape":"Transaction", + "documentation":"Contains the details of the transaction.
" + } + } + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"The container for the exception message.
" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"The container of the retryAfterSeconds value.
The request processing has failed because of an internal error in the service.
", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListTokenBalancesInput":{ + "type":"structure", + "required":["tokenFilter"], + "members":{ + "ownerFilter":{ + "shape":"OwnerFilter", + "documentation":"The contract or wallet address on the blockchain network by which to filter the request. You must specify the address property of the ownerFilter when listing balances of tokens owned by the address.
The contract address or a token identifier on the blockchain network by which to filter the request. You must specify the contractAddress property of this container when listing tokens minted by a contract.
You must always specify the network property of this container when using this operation.
The pagination token that indicates the next set of results to retrieve.
" + }, + "maxResults":{ + "shape":"ListTokenBalancesInputMaxResultsInteger", + "documentation":"The maximum number of token balances to return.
" + } + } + }, + "ListTokenBalancesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":250, + "min":1 + }, + "ListTokenBalancesOutput":{ + "type":"structure", + "required":["tokenBalances"], + "members":{ + "tokenBalances":{ + "shape":"TokenBalanceList", + "documentation":"An array of TokenBalance objects. Each object contains details about the token balance.
The pagination token that indicates the next set of results to retrieve.
" + } + } + }, + "ListTransactionEventsInput":{ + "type":"structure", + "required":[ + "transactionHash", + "network" + ], + "members":{ + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + }, + "network":{ + "shape":"QueryNetwork", + "documentation":"The blockchain network where the transaction events occurred.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The pagination token that indicates the next set of results to retrieve.
" + }, + "maxResults":{ + "shape":"ListTransactionEventsInputMaxResultsInteger", + "documentation":"The maximum number of transaction events to list.
Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return
An array of TransactionEvent objects. Each object contains details about the transaction events.
The pagination token that indicates the next set of results to retrieve.
" + } + } + }, + "ListTransactionsInput":{ + "type":"structure", + "required":[ + "address", + "network" + ], + "members":{ + "address":{ + "shape":"ChainAddress", + "documentation":"The address (either a contract or wallet), whose transactions are being requested.
" + }, + "network":{ + "shape":"QueryNetwork", + "documentation":"The blockchain network where the transactions occurred.
" + }, + "fromBlockchainInstant":{"shape":"BlockchainInstant"}, + "toBlockchainInstant":{"shape":"BlockchainInstant"}, + "sort":{ + "shape":"ListTransactionsSort", + "documentation":"Sorts items in an ascending order if the first page starts at fromTime. Sorts items in a descending order if the first page starts at toTime.
The pagination token that indicates the next set of results to retrieve.
" + }, + "maxResults":{ + "shape":"ListTransactionsInputMaxResultsInteger", + "documentation":"The maximum number of transactions to list.
Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.
To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return
The array of transactions returned by the request.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The pagination token that indicates the next set of results to retrieve.
" + } + } + }, + "ListTransactionsSort":{ + "type":"structure", + "members":{ + "sortBy":{ + "shape":"ListTransactionsSortBy", + "documentation":"Defaults to the value TRANSACTION_TIMESTAMP.
The container for the sort order for ListTransactions. The SortOrder field only accepts the values ASCENDING and DESCENDING. Not providing SortOrder will default to ASCENDING.
The container for determining how the list transaction result will be sorted.
" + }, + "ListTransactionsSortBy":{ + "type":"string", + "enum":["TRANSACTION_TIMESTAMP"] + }, + "Long":{ + "type":"long", + "box":true + }, + "NextToken":{ + "type":"string", + "max":131070, + "min":0 + }, + "OwnerFilter":{ + "type":"structure", + "required":["address"], + "members":{ + "address":{ + "shape":"ChainAddress", + "documentation":"The contract or wallet address.
" + } + }, + "documentation":"The container for the owner information to filter by.
" + }, + "OwnerIdentifier":{ + "type":"structure", + "required":["address"], + "members":{ + "address":{ + "shape":"ChainAddress", + "documentation":"The contract or wallet address for the owner.
" + } + }, + "documentation":"The container for the identifier of the owner.
" + }, + "QueryNetwork":{ + "type":"string", + "enum":[ + "ETHEREUM_MAINNET", + "BITCOIN_MAINNET" + ] + }, + "QueryTokenId":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{1,66}" + }, + "QueryTransactionEventType":{ + "type":"string", + "enum":[ + "ERC20_TRANSFER", + "ERC20_MINT", + "ERC20_BURN", + "ERC20_DEPOSIT", + "ERC20_WITHDRAWAL", + "ERC721_TRANSFER", + "ERC1155_TRANSFER", + "BITCOIN_VIN", + "BITCOIN_VOUT", + "INTERNAL_ETH_TRANSFER", + "ETH_TRANSFER" + ] + }, + "QueryTransactionHash":{ + "type":"string", + "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" + }, + "QueryTransactionStatus":{ + "type":"string", + "enum":[ + "FINAL", + "FAILED" + ] + }, + "QuotaCode":{"type":"string"}, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"The container for the exception message.
" + }, + "resourceId":{ + "shape":"ResourceId", + "documentation":"The resourceId of the resource that caused the exception.
The resourceType of the resource that caused the exception.
The resource was not found.
", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":["collection"] + }, + "ServiceCode":{"type":"string"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"The container for the exception message.
" + }, + "resourceId":{ + "shape":"ResourceId", + "documentation":"The resourceId of the resource that caused the exception.
The resourceType of the resource that caused the exception.
The container for the serviceCode.
The container for the quotaCode.
The service quota has been exceeded for this resource.
", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":[ + "message", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"The container for the exception message.
" + }, + "serviceCode":{ + "shape":"ServiceCode", + "documentation":"The container for the serviceCode.
The container for the quotaCode.
The container of the retryAfterSeconds value.
The request or operation couldn't be performed because a service is throttling requests. The most common source of throttling errors is when you create resources that exceed your service limit for this resource type. Request a limit increase or delete unused resources, if possible.
", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "TokenBalance":{ + "type":"structure", + "required":[ + "balance", + "atBlockchainInstant" + ], + "members":{ + "ownerIdentifier":{ + "shape":"OwnerIdentifier", + "documentation":"The container for the identifier of the owner.
" + }, + "tokenIdentifier":{ + "shape":"TokenIdentifier", + "documentation":"The identifier for the token, including the unique token ID and its blockchain network.
" + }, + "balance":{ + "shape":"String", + "documentation":"The container of the token balance.
" + }, + "atBlockchainInstant":{ + "shape":"BlockchainInstant", + "documentation":"The time for when the TokenBalance is requested or the current time if a time is not provided in the request.
This time will only be recorded up to the second.
The timestamp of the last transaction at which the balance for the token in the wallet was updated.
The balance of the token.
" + }, + "TokenBalanceList":{ + "type":"list", + "member":{"shape":"TokenBalance"}, + "max":250, + "min":0 + }, + "TokenFilter":{ + "type":"structure", + "required":["network"], + "members":{ + "network":{ + "shape":"QueryNetwork", + "documentation":"The blockchain network of the token.
" + }, + "contractAddress":{ + "shape":"ChainAddress", + "documentation":"This is the address of the contract.
" + }, + "tokenId":{ + "shape":"QueryTokenId", + "documentation":"The unique identifier of the token.
" + } + }, + "documentation":"The container of the token filter like the contract address on a given blockchain network or a unique token identifier on a given blockchain network.
You must always specify the network property of this container when using this operation.
The blockchain network of the token.
" + }, + "contractAddress":{ + "shape":"ChainAddress", + "documentation":"This is the token's contract address.
" + }, + "tokenId":{ + "shape":"QueryTokenId", + "documentation":"The unique identifier of the token.
" + } + }, + "documentation":"The container for the identifier for the token including the unique token ID and its blockchain network.
Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.
The blockchain network where the transaction occured.
" + }, + "blockHash":{ + "shape":"BlockHash", + "documentation":"The block hash is a unique identifier for a block. It is a fixed-size string that is calculated by using the information in the block. The block hash is used to verify the integrity of the data in the block.
" + }, + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + }, + "blockNumber":{ + "shape":"String", + "documentation":"The block number in which the transaction is recorded.
" + }, + "transactionTimestamp":{ + "shape":"Timestamp", + "documentation":"The Timestamp of the transaction.
The index of the transaction within a blockchain.
" + }, + "numberOfTransactions":{ + "shape":"Long", + "documentation":"The number of transactions in the block.
" + }, + "status":{ + "shape":"QueryTransactionStatus", + "documentation":"The status of the transaction.
" + }, + "to":{ + "shape":"ChainAddress", + "documentation":"The identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + }, + "from":{ + "shape":"ChainAddress", + "documentation":"The initiator of the transaction. It is either in the form a public key or a contract address.
" + }, + "contractAddress":{ + "shape":"ChainAddress", + "documentation":"The blockchain address for the contract.
" + }, + "gasUsed":{ + "shape":"String", + "documentation":"The amount of gas used for the transaction.
" + }, + "cumulativeGasUsed":{ + "shape":"String", + "documentation":"The amount of gas used up to the specified point in the block.
" + }, + "effectiveGasPrice":{ + "shape":"String", + "documentation":"The effective gas price.
" + }, + "signatureV":{ + "shape":"Integer", + "documentation":"The signature of the transaction. The Z coordinate of a point V.
" + }, + "signatureR":{ + "shape":"String", + "documentation":"The signature of the transaction. The X coordinate of a point R.
" + }, + "signatureS":{ + "shape":"String", + "documentation":"The signature of the transaction. The Y coordinate of a point S.
" + }, + "transactionFee":{ + "shape":"String", + "documentation":"The transaction fee.
" + }, + "transactionId":{ + "shape":"String", + "documentation":"The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + } + }, + "documentation":"There are two possible types of transactions used for this data type:
A Bitcoin transaction is a movement of BTC from one address to another.
An Ethereum transaction refers to an action initiated by an externally owned account, which is an account managed by a human, not a contract. For example, if Bob sends Alice 1 ETH, Bob's account must be debited and Alice's must be credited. This state-changing action occurs within a transaction.
The blockchain network where the transaction occurred.
" + }, + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + }, + "eventType":{ + "shape":"QueryTransactionEventType", + "documentation":"The type of transaction event.
" + }, + "from":{ + "shape":"ChainAddress", + "documentation":"The wallet address initiating the transaction. It can either be a public key or a contract.
" + }, + "to":{ + "shape":"ChainAddress", + "documentation":"The wallet address receiving the transaction. It can either be a public key or a contract.
" + }, + "value":{ + "shape":"String", + "documentation":"The value that was transacted.
" + }, + "contractAddress":{ + "shape":"ChainAddress", + "documentation":"The blockchain address. for the contract
" + }, + "tokenId":{ + "shape":"QueryTokenId", + "documentation":"The unique identifier for the token involved in the transaction.
" + }, + "transactionId":{ + "shape":"String", + "documentation":"The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + }, + "voutIndex":{ + "shape":"Integer", + "documentation":"The position of the vout in the transaction output list.
" + } + }, + "documentation":"The container for the properties of a transaction event.
" + }, + "TransactionEventList":{ + "type":"list", + "member":{"shape":"TransactionEvent"}, + "max":250, + "min":0 + }, + "TransactionOutputItem":{ + "type":"structure", + "required":[ + "transactionHash", + "network", + "transactionTimestamp" + ], + "members":{ + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.
" + }, + "network":{ + "shape":"QueryNetwork", + "documentation":"The blockchain network where the transaction occurred.
" + }, + "transactionTimestamp":{ + "shape":"Timestamp", + "documentation":"The time when the transaction occurred.
" + } + }, + "documentation":"The container of the transaction output.
" + }, + "TransactionOutputList":{ + "type":"list", + "member":{"shape":"TransactionOutputItem"}, + "max":250, + "min":0 + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"The container for the exception message.
" + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"The container for the reason for the exception
" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"The container for the fieldList of the exception.
The resource passed is invalid.
", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"The name of the field that triggered the ValidationException.
The ValidationException message.
The resource passed is invalid.
" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + } + }, + "documentation":"Amazon Managed Blockchain (AMB) Query provides you with convenient access to multi-blockchain network data, which makes it easier for you to extract contextual data related to blockchain activity. You can use AMB Query to read data from public blockchain networks, such as Bitcoin Mainnet and Ethereum Mainnet. You can also get information such as the current and historical balances of addresses, or you can get a list of blockchain transactions for a given time period. Additionally, you can get details of a given transaction, such as transaction events, which you can further analyze or use in business logic for your applications.
" +} diff --git a/services/managedblockchainquery/src/main/resources/codegen-resources/waiters-2.json b/services/managedblockchainquery/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/managedblockchainquery/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} From 79449ce4ffcec9c845681c2c4eee8b30ca5bd3d1 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:24:49 +0000 Subject: [PATCH 039/270] AWS Elemental MediaConvert Update: This release includes general updates to user documentation. --- ...ture-AWSElementalMediaConvert-7f90871.json | 6 + .../codegen-resources/service-2.json | 1346 ++++++++--------- 2 files changed, 679 insertions(+), 673 deletions(-) create mode 100644 .changes/next-release/feature-AWSElementalMediaConvert-7f90871.json diff --git a/.changes/next-release/feature-AWSElementalMediaConvert-7f90871.json b/.changes/next-release/feature-AWSElementalMediaConvert-7f90871.json new file mode 100644 index 000000000000..c9ad3ec656f9 --- /dev/null +++ b/.changes/next-release/feature-AWSElementalMediaConvert-7f90871.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes general updates to user documentation." +} diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index b8433f9290e5..c6ee5fd641da 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -1217,7 +1217,7 @@ "Bitrate": { "shape": "__integerMin6000Max1024000", "locationName": "bitrate", - "documentation": "Specify the average bitrate in bits per second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values that you choose for Profile (codecProfile), Bitrate control mode (codingMode), and Sample rate (sampleRate). Default values depend on Bitrate control mode and Profile." + "documentation": "Specify the average bitrate in bits per second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values that you choose for Profile, Bitrate control mode, and Sample rate. Default values depend on Bitrate control mode and Profile." }, "CodecProfile": { "shape": "AacCodecProfile", @@ -1255,7 +1255,7 @@ "documentation": "VBR Quality Level - Only used if rate_control_mode is VBR." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode." + "documentation": "Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode." }, "AacSpecification": { "type": "string", @@ -1301,7 +1301,7 @@ }, "Ac3DynamicRangeCompressionLine": { "type": "string", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "FILM_STANDARD", "FILM_LIGHT", @@ -1313,7 +1313,7 @@ }, "Ac3DynamicRangeCompressionProfile": { "type": "string", - "documentation": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile (DynamicRangeCompressionProfile). The mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None (NONE) to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) to set the profile to Dolby's film standard profile for all operating modes.", + "documentation": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile. The mode-specific settings are Dynamic range compression profile, line mode and Dynamic range compression profile, RF mode. Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None to leave out DRC signaling. Keep the default Film standard to set the profile to Dolby's film standard profile for all operating modes.", "enum": [ "FILM_STANDARD", "NONE" @@ -1321,7 +1321,7 @@ }, "Ac3DynamicRangeCompressionRf": { "type": "string", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "FILM_STANDARD", "FILM_LIGHT", @@ -1373,17 +1373,17 @@ "DynamicRangeCompressionLine": { "shape": "Ac3DynamicRangeCompressionLine", "locationName": "dynamicRangeCompressionLine", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "DynamicRangeCompressionProfile": { "shape": "Ac3DynamicRangeCompressionProfile", "locationName": "dynamicRangeCompressionProfile", - "documentation": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile (DynamicRangeCompressionProfile). The mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None (NONE) to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) to set the profile to Dolby's film standard profile for all operating modes." + "documentation": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile. The mode-specific settings are Dynamic range compression profile, line mode and Dynamic range compression profile, RF mode. Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None to leave out DRC signaling. Keep the default Film standard to set the profile to Dolby's film standard profile for all operating modes." }, "DynamicRangeCompressionRf": { "shape": "Ac3DynamicRangeCompressionRf", "locationName": "dynamicRangeCompressionRf", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "LfeFilter": { "shape": "Ac3LfeFilter", @@ -1401,7 +1401,7 @@ "documentation": "This value is always 48000. It represents the sample rate in Hz." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AC3." + "documentation": "Required when you set Codec to the value AC3." }, "AccelerationMode": { "type": "string", @@ -1479,7 +1479,7 @@ }, "AfdSignaling": { "type": "string", - "documentation": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", + "documentation": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", "enum": [ "NONE", "AUTO", @@ -1492,7 +1492,7 @@ "BitDepth": { "shape": "__integerMin16Max24", "locationName": "bitDepth", - "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." + "documentation": "Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track." }, "Channels": { "shape": "__integerMin1Max64", @@ -1505,7 +1505,7 @@ "documentation": "Sample rate in hz." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AIFF." + "documentation": "Required when you set Codec to the value AIFF." }, "AllowedRenditionSize": { "type": "structure", @@ -1538,7 +1538,7 @@ }, "AncillaryConvert608To708": { "type": "string", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "enum": [ "UPCONVERT", "DISABLED" @@ -1550,7 +1550,7 @@ "Convert608To708": { "shape": "AncillaryConvert608To708", "locationName": "convert608To708", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "SourceAncillaryChannelNumber": { "shape": "__integerMin1Max4", @@ -1633,7 +1633,7 @@ }, "AudioCodec": { "type": "string", - "documentation": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output", + "documentation": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output", "enum": [ "AAC", "MP2", @@ -1654,37 +1654,37 @@ "AacSettings": { "shape": "AacSettings", "locationName": "aacSettings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode." + "documentation": "Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode." }, "Ac3Settings": { "shape": "Ac3Settings", "locationName": "ac3Settings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AC3." + "documentation": "Required when you set Codec to the value AC3." }, "AiffSettings": { "shape": "AiffSettings", "locationName": "aiffSettings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AIFF." + "documentation": "Required when you set Codec to the value AIFF." }, "Codec": { "shape": "AudioCodec", "locationName": "codec", - "documentation": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output" + "documentation": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output" }, "Eac3AtmosSettings": { "shape": "Eac3AtmosSettings", "locationName": "eac3AtmosSettings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3_ATMOS." + "documentation": "Required when you set Codec to the value EAC3_ATMOS." }, "Eac3Settings": { "shape": "Eac3Settings", "locationName": "eac3Settings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3." + "documentation": "Required when you set Codec to the value EAC3." }, "Mp2Settings": { "shape": "Mp2Settings", "locationName": "mp2Settings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2." + "documentation": "Required when you set Codec to the value MP2." }, "Mp3Settings": { "shape": "Mp3Settings", @@ -1704,7 +1704,7 @@ "WavSettings": { "shape": "WavSettings", "locationName": "wavSettings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value WAV." + "documentation": "Required when you set Codec to the value WAV." } }, "documentation": "Settings related to audio encoding. The settings in this group vary depending on the value that you choose for your audio codec." @@ -1753,7 +1753,7 @@ "CustomLanguageCode": { "shape": "__stringPatternAZaZ23AZaZ", "locationName": "customLanguageCode", - "documentation": "Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control (AudioLanguageCodeControl) to Use configured (USE_CONFIGURED). The service also uses your specified custom language code when you set Language code control (AudioLanguageCodeControl) to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming." + "documentation": "Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control to Use configured. The service also uses your specified custom language code when you set Language code control to Follow input, but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming." }, "LanguageCode": { "shape": "LanguageCode", @@ -1763,7 +1763,7 @@ "LanguageCodeControl": { "shape": "AudioLanguageCodeControl", "locationName": "languageCodeControl", - "documentation": "Specify which source for language code takes precedence for this audio track. When you choose Follow input (FOLLOW_INPUT), the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code (languageCode or customLanguageCode). When you choose Use configured (USE_CONFIGURED), the service uses the language code that you specify." + "documentation": "Specify which source for language code takes precedence for this audio track. When you choose Follow input, the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code. When you choose Use configured, the service uses the language code that you specify." }, "RemixSettings": { "shape": "RemixSettings", @@ -1790,7 +1790,7 @@ }, "AudioLanguageCodeControl": { "type": "string", - "documentation": "Specify which source for language code takes precedence for this audio track. When you choose Follow input (FOLLOW_INPUT), the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code (languageCode or customLanguageCode). When you choose Use configured (USE_CONFIGURED), the service uses the language code that you specify.", + "documentation": "Specify which source for language code takes precedence for this audio track. When you choose Follow input, the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code. When you choose Use configured, the service uses the language code that you specify.", "enum": [ "FOLLOW_INPUT", "USE_CONFIGURED" @@ -1861,7 +1861,7 @@ "TargetLkfs": { "shape": "__doubleMinNegative59Max0", "locationName": "targetLkfs", - "documentation": "When you use Audio normalization (AudioNormalizationSettings), optionally use this setting to specify a target loudness. If you don't specify a value here, the encoder chooses a value for you, based on the algorithm that you choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS." + "documentation": "When you use Audio normalization, optionally use this setting to specify a target loudness. If you don't specify a value here, the encoder chooses a value for you, based on the algorithm that you choose for Algorithm. If you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS." }, "TruePeakLimiterThreshold": { "shape": "__doubleMinNegative8Max0", @@ -1917,7 +1917,7 @@ "ProgramSelection": { "shape": "__integerMin0Max8", "locationName": "programSelection", - "documentation": "Use this setting for input streams that contain Dolby E, to have the service extract specific program data from the track. To select multiple programs, create multiple selectors with the same Track and different Program numbers. In the console, this setting is visible when you set Selector type to Track. Choose the program number from the dropdown list. If you are sending a JSON file, provide the program ID, which is part of the audio metadata. If your input file has incorrect metadata, you can choose All channels instead of a program number to have the service ignore the program IDs and include all the programs in the track." + "documentation": "Use this setting for input streams that contain Dolby E, to have the service extract specific program data from the track. To select multiple programs, create multiple selectors with the same Track and different Program numbers. In the console, this setting is visible when you set Selector type to Track. Choose the program number from the dropdown list. If your input file has incorrect metadata, you can choose All channels instead of a program number to have the service ignore the program IDs and include all the programs in the track." }, "RemixSettings": { "shape": "RemixSettings", @@ -1932,10 +1932,10 @@ "Tracks": { "shape": "__listOf__integerMin1Max2147483647", "locationName": "tracks", - "documentation": "Identify a track from the input audio to include in this selector by entering the track index number. To include several tracks in a single audio selector, specify multiple tracks as follows. Using the console, enter a comma-separated list. For examle, type \"1,2,3\" to include tracks 1 through 3. Specifying directly in your JSON job file, provide the track numbers in an array. For example, \"tracks\": [1,2,3]." + "documentation": "Identify a track from the input audio to include in this selector by entering the track index number. To include several tracks in a single audio selector, specify multiple tracks as follows. Using the console, enter a comma-separated list. For example, type \"1,2,3\" to include tracks 1 through 3." } }, - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." + "documentation": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "AudioSelectorGroup": { "type": "structure", @@ -1946,7 +1946,7 @@ "documentation": "Name of an Audio Selector within the same input to include in the group. Audio selector names are standardized, based on their order within the input (e.g., \"Audio Selector 1\"). The audio selector name parameter can be repeated to add any number of audio selectors to the group." } }, - "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." + "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." }, "AudioSelectorType": { "type": "string", @@ -2036,7 +2036,7 @@ }, "Av1AdaptiveQuantization": { "type": "string", - "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization).", + "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization.", "enum": [ "OFF", "LOW", @@ -2048,7 +2048,7 @@ }, "Av1BitDepth": { "type": "string", - "documentation": "Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit (BIT_10).", + "documentation": "Specify the Bit depth. You can choose 8-bit or 10-bit.", "enum": [ "BIT_8", "BIT_10" @@ -2056,7 +2056,7 @@ }, "Av1FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -2077,7 +2077,7 @@ "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", - "documentation": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + "documentation": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." }, "QvbrQualityLevelFineTune": { "shape": "__doubleMin0Max1", @@ -2085,7 +2085,7 @@ "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." } }, - "documentation": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Use these settings only when you set QVBR for Rate control mode." }, "Av1RateControlMode": { "type": "string", @@ -2100,17 +2100,17 @@ "AdaptiveQuantization": { "shape": "Av1AdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization)." + "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization." }, "BitDepth": { "shape": "Av1BitDepth", "locationName": "bitDepth", - "documentation": "Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit (BIT_10)." + "documentation": "Specify the Bit depth. You can choose 8-bit or 10-bit." }, "FramerateControl": { "shape": "Av1FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "Av1FramerateConversionAlgorithm", @@ -2145,7 +2145,7 @@ "QvbrSettings": { "shape": "Av1QvbrSettings", "locationName": "qvbrSettings", - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." }, "RateControlMode": { "shape": "Av1RateControlMode", @@ -2160,14 +2160,14 @@ "SpatialAdaptiveQuantization": { "shape": "Av1SpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." } }, "documentation": "Required when you set Codec, under VideoDescription>CodecSettings to the value AV1." }, "Av1SpatialAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "enum": [ "DISABLED", "ENABLED" @@ -2196,7 +2196,7 @@ }, "AvcIntraFramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -2213,7 +2213,7 @@ }, "AvcIntraInterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -2224,7 +2224,7 @@ }, "AvcIntraScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -2241,12 +2241,12 @@ "AvcIntraUhdSettings": { "shape": "AvcIntraUhdSettings", "locationName": "avcIntraUhdSettings", - "documentation": "Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object isn't allowed." + "documentation": "Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra class to a different value, this object isn't allowed." }, "FramerateControl": { "shape": "AvcIntraFramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "AvcIntraFramerateConversionAlgorithm", @@ -2266,29 +2266,29 @@ "InterlaceMode": { "shape": "AvcIntraInterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "ScanTypeConversionMode": { "shape": "AvcIntraScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SlowPal": { "shape": "AvcIntraSlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." }, "Telecine": { "shape": "AvcIntraTelecine", "locationName": "telecine", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." } }, "documentation": "Required when you choose AVC-Intra for your output video codec. For more information about the AVC-Intra settings, see the relevant specification. For detailed information about SD and HD in AVC-Intra, see https://ieeexplore.ieee.org/document/7290936. For information about 4K/2K in AVC-Intra, see https://pro-av.panasonic.net/en/avc-ultra/AVC-ULTRAoverview.pdf." }, "AvcIntraSlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -2296,7 +2296,7 @@ }, "AvcIntraTelecine": { "type": "string", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "enum": [ "NONE", "HARD" @@ -2304,7 +2304,7 @@ }, "AvcIntraUhdQualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass (MULTI_PASS), your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS).", + "documentation": "Optional. Use Quality tuning level to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass, your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass, your encoding time is faster. The default behavior is Single-pass.", "enum": [ "SINGLE_PASS", "MULTI_PASS" @@ -2316,10 +2316,10 @@ "QualityTuningLevel": { "shape": "AvcIntraUhdQualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass (MULTI_PASS), your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS)." + "documentation": "Optional. Use Quality tuning level to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass, your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass, your encoding time is faster. The default behavior is Single-pass." } }, - "documentation": "Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object isn't allowed." + "documentation": "Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra class to a different value, this object isn't allowed." }, "BadRequestException": { "type": "structure", @@ -2384,7 +2384,7 @@ }, "BurnInSubtitleStylePassthrough": { "type": "string", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", "enum": [ "ENABLED", "DISABLED" @@ -2401,27 +2401,27 @@ "ApplyFontColor": { "shape": "BurninSubtitleApplyFontColor", "locationName": "applyFontColor", - "documentation": "Ignore this setting unless Style passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." + "documentation": "Ignore this setting unless Style passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." }, "BackgroundColor": { "shape": "BurninSubtitleBackgroundColor", "locationName": "backgroundColor", - "documentation": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present." + "documentation": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present." }, "BackgroundOpacity": { "shape": "__integerMin0Max255", "locationName": "backgroundOpacity", - "documentation": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions." + "documentation": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions." }, "FallbackFont": { "shape": "BurninSubtitleFallbackFont", "locationName": "fallbackFont", - "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." }, "FontColor": { "shape": "BurninSubtitleFontColor", "locationName": "fontColor", - "documentation": "Specify the color of the burned-in captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present." + "documentation": "Specify the color of the burned-in captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present." }, "FontOpacity": { "shape": "__integerMin0Max255", @@ -2431,17 +2431,17 @@ "FontResolution": { "shape": "__integerMin96Max600", "locationName": "fontResolution", - "documentation": "Specify the Font resolution (FontResolution) in DPI (dots per inch)." + "documentation": "Specify the Font resolution in DPI (dots per inch)." }, "FontScript": { "shape": "FontScript", "locationName": "fontScript", - "documentation": "Set Font script (FontScript) to Automatically determined (AUTOMATIC), or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese." + "documentation": "Set Font script to Automatically determined, or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese." }, "FontSize": { "shape": "__integerMin0Max96", "locationName": "fontSize", - "documentation": "Specify the Font size (FontSize) in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size." + "documentation": "Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size." }, "HexFontColor": { "shape": "__stringMin6Max8Pattern09aFAF609aFAF2", @@ -2451,22 +2451,22 @@ "OutlineColor": { "shape": "BurninSubtitleOutlineColor", "locationName": "outlineColor", - "documentation": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present." + "documentation": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present." }, "OutlineSize": { "shape": "__integerMin0Max10", "locationName": "outlineSize", - "documentation": "Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave Outline size blank and set Style passthrough (StylePassthrough) to enabled to use the outline size data from your input captions, if present." + "documentation": "Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present." }, "ShadowColor": { "shape": "BurninSubtitleShadowColor", "locationName": "shadowColor", - "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present." + "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present." }, "ShadowOpacity": { "shape": "__integerMin0Max255", "locationName": "shadowOpacity", - "documentation": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions." + "documentation": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions." }, "ShadowXOffset": { "shape": "__integerMinNegative2147483648Max2147483647", @@ -2476,30 +2476,30 @@ "ShadowYOffset": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "shadowYOffset", - "documentation": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset (ShadowYOffset) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow y-offset data from your input captions, if present." + "documentation": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset blank and set Style passthrough to enabled to use the shadow y-offset data from your input captions, if present." }, "StylePassthrough": { "shape": "BurnInSubtitleStylePassthrough", "locationName": "stylePassthrough", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." }, "TeletextSpacing": { "shape": "BurninSubtitleTeletextSpacing", "locationName": "teletextSpacing", - "documentation": "Specify whether the text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions." + "documentation": "Specify whether the text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions." }, "XPosition": { "shape": "__integerMin0Max2147483647", "locationName": "xPosition", - "documentation": "Specify the horizontal position (XPosition) of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter." + "documentation": "Specify the horizontal position of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter." }, "YPosition": { "shape": "__integerMin0Max2147483647", "locationName": "yPosition", - "documentation": "Specify the vertical position (YPosition) of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output." + "documentation": "Specify the vertical position of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output." } }, - "documentation": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to BURN_IN." + "documentation": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html." }, "BurninSubtitleAlignment": { "type": "string", @@ -2512,7 +2512,7 @@ }, "BurninSubtitleApplyFontColor": { "type": "string", - "documentation": "Ignore this setting unless Style passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", + "documentation": "Ignore this setting unless Style passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", "enum": [ "WHITE_TEXT_ONLY", "ALL_TEXT" @@ -2520,7 +2520,7 @@ }, "BurninSubtitleBackgroundColor": { "type": "string", - "documentation": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present.", + "documentation": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present.", "enum": [ "NONE", "BLACK", @@ -2530,7 +2530,7 @@ }, "BurninSubtitleFallbackFont": { "type": "string", - "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", "enum": [ "BEST_MATCH", "MONOSPACED_SANSSERIF", @@ -2541,7 +2541,7 @@ }, "BurninSubtitleFontColor": { "type": "string", - "documentation": "Specify the color of the burned-in captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present.", + "documentation": "Specify the color of the burned-in captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present.", "enum": [ "WHITE", "BLACK", @@ -2555,7 +2555,7 @@ }, "BurninSubtitleOutlineColor": { "type": "string", - "documentation": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present.", + "documentation": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present.", "enum": [ "BLACK", "WHITE", @@ -2568,7 +2568,7 @@ }, "BurninSubtitleShadowColor": { "type": "string", - "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present.", + "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present.", "enum": [ "NONE", "BLACK", @@ -2578,7 +2578,7 @@ }, "BurninSubtitleTeletextSpacing": { "type": "string", - "documentation": "Specify whether the text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions.", + "documentation": "Specify whether the text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions.", "enum": [ "FIXED_GRID", "PROPORTIONAL", @@ -2620,7 +2620,7 @@ "DestinationSettings": { "shape": "CaptionDestinationSettings", "locationName": "destinationSettings", - "documentation": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." + "documentation": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." }, "LanguageCode": { "shape": "LanguageCode", @@ -2646,7 +2646,7 @@ "DestinationSettings": { "shape": "CaptionDestinationSettings", "locationName": "destinationSettings", - "documentation": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." + "documentation": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." }, "LanguageCode": { "shape": "LanguageCode", @@ -2667,59 +2667,59 @@ "BurninDestinationSettings": { "shape": "BurninDestinationSettings", "locationName": "burninDestinationSettings", - "documentation": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to BURN_IN." + "documentation": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html." }, "DestinationType": { "shape": "CaptionDestinationType", "locationName": "destinationType", - "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20)." + "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20." }, "DvbSubDestinationSettings": { "shape": "DvbSubDestinationSettings", "locationName": "dvbSubDestinationSettings", - "documentation": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to DVB_SUB." + "documentation": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html." }, "EmbeddedDestinationSettings": { "shape": "EmbeddedDestinationSettings", "locationName": "embeddedDestinationSettings", - "documentation": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED." + "documentation": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html." }, "ImscDestinationSettings": { "shape": "ImscDestinationSettings", "locationName": "imscDestinationSettings", - "documentation": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to IMSC." + "documentation": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." }, "SccDestinationSettings": { "shape": "SccDestinationSettings", "locationName": "sccDestinationSettings", - "documentation": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SCC." + "documentation": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html." }, "SrtDestinationSettings": { "shape": "SrtDestinationSettings", "locationName": "srtDestinationSettings", - "documentation": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SRT." + "documentation": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video." }, "TeletextDestinationSettings": { "shape": "TeletextDestinationSettings", "locationName": "teletextDestinationSettings", - "documentation": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TELETEXT." + "documentation": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html." }, "TtmlDestinationSettings": { "shape": "TtmlDestinationSettings", "locationName": "ttmlDestinationSettings", - "documentation": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TTML." + "documentation": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." }, "WebvttDestinationSettings": { "shape": "WebvttDestinationSettings", "locationName": "webvttDestinationSettings", - "documentation": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to WebVTT." + "documentation": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." } }, - "documentation": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." + "documentation": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." }, "CaptionDestinationType": { "type": "string", - "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20).", + "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20.", "enum": [ "BURN_IN", "DVB_SUB", @@ -2770,15 +2770,15 @@ "FramerateDenominator": { "shape": "__integerMin1Max1001", "locationName": "framerateDenominator", - "documentation": "Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate numerator (framerateNumerator)." + "documentation": "Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate. Use this setting along with the setting Framerate numerator." }, "FramerateNumerator": { "shape": "__integerMin1Max60000", "locationName": "framerateNumerator", - "documentation": "Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate denominator (framerateDenominator)." + "documentation": "Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate. Use this setting along with the setting Framerate denominator." } }, - "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. When you work directly in your JSON job specification, use the settings framerateNumerator and framerateDenominator. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." }, "CaptionSourceSettings": { "type": "structure", @@ -2806,7 +2806,7 @@ "SourceType": { "shape": "CaptionSourceType", "locationName": "sourceType", - "documentation": "Use Source (SourceType) to identify the format of your input captions. The service cannot auto-detect caption format." + "documentation": "Use Source to identify the format of your input captions. The service cannot auto-detect caption format." }, "TeletextSourceSettings": { "shape": "TeletextSourceSettings", @@ -2828,7 +2828,7 @@ }, "CaptionSourceType": { "type": "string", - "documentation": "Use Source (SourceType) to identify the format of your input captions. The service cannot auto-detect caption format.", + "documentation": "Use Source to identify the format of your input captions. The service cannot auto-detect caption format.", "enum": [ "ANCILLARY", "DVB_SUB", @@ -2855,7 +2855,7 @@ "documentation": "In your JSON job specification, include one child of OutputChannels for each audio channel that you want in your output. Each child should contain one instance of InputChannels or InputChannelsFineTune." } }, - "documentation": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both." + "documentation": "Channel mapping contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both." }, "ClipLimits": { "type": "structure", @@ -2901,7 +2901,7 @@ }, "CmafClientCache": { "type": "string", - "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header.", "enum": [ "DISABLED", "ENABLED" @@ -2926,7 +2926,7 @@ "EncryptionMethod": { "shape": "CmafEncryptionType", "locationName": "encryptionMethod", - "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR)." + "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample or AES_CTR." }, "InitializationVectorInManifest": { "shape": "CmafInitializationVectorInManifest", @@ -2953,7 +2953,7 @@ }, "CmafEncryptionType": { "type": "string", - "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR).", + "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample or AES_CTR.", "enum": [ "SAMPLE_AES", "AES_CTR" @@ -2975,7 +2975,7 @@ "ClientCache": { "shape": "CmafClientCache", "locationName": "clientCache", - "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header." }, "CodecSpecification": { "shape": "CmafCodecSpecification", @@ -2990,7 +2990,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -3005,12 +3005,12 @@ "FragmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "fragmentLength", - "documentation": "Specify the length, in whole seconds, of the mp4 fragments. When you don't specify a value, MediaConvert defaults to 2. Related setting: Use Fragment length control (FragmentLengthControl) to specify whether the encoder enforces this value strictly." + "documentation": "Specify the length, in whole seconds, of the mp4 fragments. When you don't specify a value, MediaConvert defaults to 2. Related setting: Use Fragment length control to specify whether the encoder enforces this value strictly." }, "ImageBasedTrickPlay": { "shape": "CmafImageBasedTrickPlay", "locationName": "imageBasedTrickPlay", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest, MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, "ImageBasedTrickPlaySettings": { "shape": "CmafImageBasedTrickPlaySettings", @@ -3045,12 +3045,12 @@ "MpdProfile": { "shape": "CmafMpdProfile", "locationName": "mpdProfile", - "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE)." + "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file." }, "PtsOffsetHandlingForBFrames": { "shape": "CmafPtsOffsetHandlingForBFrames", "locationName": "ptsOffsetHandlingForBFrames", - "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." + "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." }, "SegmentControl": { "shape": "CmafSegmentControl", @@ -3060,12 +3060,12 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (CmafSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." }, "SegmentLengthControl": { "shape": "CmafSegmentLengthControl", "locationName": "segmentLengthControl", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." }, "StreamInfResolution": { "shape": "CmafStreamInfResolution", @@ -3095,14 +3095,14 @@ "WriteSegmentTimelineInRepresentation": { "shape": "CmafWriteSegmentTimelineInRepresentation", "locationName": "writeSegmentTimelineInRepresentation", - "documentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." + "documentation": "When you enable Precise segment duration in DASH manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." } }, - "documentation": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to CMAF_GROUP_SETTINGS." + "documentation": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "CmafImageBasedTrickPlay": { "type": "string", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest, MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "enum": [ "NONE", "THUMBNAIL", @@ -3196,7 +3196,7 @@ }, "CmafMpdProfile": { "type": "string", - "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE).", + "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file.", "enum": [ "MAIN_PROFILE", "ON_DEMAND_PROFILE" @@ -3204,7 +3204,7 @@ }, "CmafPtsOffsetHandlingForBFrames": { "type": "string", - "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", + "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", "enum": [ "ZERO_BASED", "MATCH_INITIAL_PTS" @@ -3220,7 +3220,7 @@ }, "CmafSegmentLengthControl": { "type": "string", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "enum": [ "EXACT", "GOP_MULTIPLE" @@ -3268,7 +3268,7 @@ }, "CmafWriteSegmentTimelineInRepresentation": { "type": "string", - "documentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", + "documentation": "When you enable Precise segment duration in DASH manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", "enum": [ "ENABLED", "DISABLED" @@ -3276,7 +3276,7 @@ }, "CmfcAudioDuration": { "type": "string", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "enum": [ "DEFAULT_CODEC_DURATION", "MATCH_VIDEO_DURATION" @@ -3284,7 +3284,7 @@ }, "CmfcAudioTrackType": { "type": "string", - "documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", + "documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", "enum": [ "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT", "ALTERNATE_AUDIO_AUTO_SELECT", @@ -3293,7 +3293,7 @@ }, "CmfcDescriptiveVideoServiceFlag": { "type": "string", - "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", + "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", "enum": [ "DONT_FLAG", "FLAG" @@ -3301,7 +3301,7 @@ }, "CmfcIFrameOnlyManifest": { "type": "string", - "documentation": "Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE).", + "documentation": "Choose Include to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude.", "enum": [ "INCLUDE", "EXCLUDE" @@ -3317,7 +3317,7 @@ }, "CmfcManifestMetadataSignaling": { "type": "string", - "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough.", + "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough.", "enum": [ "ENABLED", "DISABLED" @@ -3325,7 +3325,7 @@ }, "CmfcScte35Esam": { "type": "string", - "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", + "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML.", "enum": [ "INSERT", "NONE" @@ -3333,7 +3333,7 @@ }, "CmfcScte35Source": { "type": "string", - "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output.", + "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output.", "enum": [ "PASSTHROUGH", "NONE" @@ -3345,32 +3345,32 @@ "AudioDuration": { "shape": "CmfcAudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "AudioGroupId": { "shape": "__string", "locationName": "audioGroupId", - "documentation": "Specify the audio rendition group for this audio rendition. Specify up to one value for each audio output in your output group. This value appears in your HLS parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the value for the GROUP-ID attribute. For example, if you specify \"audio_aac_1\" for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID=\"audio_aac_1\". Related setting: To associate the rendition group that this audio track belongs to with a video rendition, include the same value that you provide here for that video output's setting Audio rendition sets (audioRenditionSets)." + "documentation": "Specify the audio rendition group for this audio rendition. Specify up to one value for each audio output in your output group. This value appears in your HLS parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the value for the GROUP-ID attribute. For example, if you specify \"audio_aac_1\" for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID=\"audio_aac_1\". Related setting: To associate the rendition group that this audio track belongs to with a video rendition, include the same value that you provide here for that video output's setting Audio rendition sets." }, "AudioRenditionSets": { "shape": "__string", "locationName": "audioRenditionSets", - "documentation": "List the audio rendition groups that you want included with this video rendition. Use a comma-separated list. For example, say you want to include the audio rendition groups that have the audio group IDs \"audio_aac_1\" and \"audio_dolby\". Then you would specify this value: \"audio_aac_1,audio_dolby\". Related setting: The rendition groups that you include in your comma-separated list should all match values that you specify in the setting Audio group ID (AudioGroupId) for audio renditions in the same output group as this video rendition. Default behavior: If you don't specify anything here and for Audio group ID, MediaConvert puts each audio variant in its own audio rendition group and associates it with every video variant. Each value in your list appears in your HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. To continue the previous example, say that the file name for the child manifest for your video rendition is \"amazing_video_1.m3u8\". Then, in your parent manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO=\"audio_aac_1\"... amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO=\"audio_dolby\"... amazing_video_1.m3u8" + "documentation": "List the audio rendition groups that you want included with this video rendition. Use a comma-separated list. For example, say you want to include the audio rendition groups that have the audio group IDs \"audio_aac_1\" and \"audio_dolby\". Then you would specify this value: \"audio_aac_1,audio_dolby\". Related setting: The rendition groups that you include in your comma-separated list should all match values that you specify in the setting Audio group ID for audio renditions in the same output group as this video rendition. Default behavior: If you don't specify anything here and for Audio group ID, MediaConvert puts each audio variant in its own audio rendition group and associates it with every video variant. Each value in your list appears in your HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. To continue the previous example, say that the file name for the child manifest for your video rendition is \"amazing_video_1.m3u8\". Then, in your parent manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO=\"audio_aac_1\"... amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO=\"audio_dolby\"... amazing_video_1.m3u8" }, "AudioTrackType": { "shape": "CmfcAudioTrackType", "locationName": "audioTrackType", - "documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." + "documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." }, "DescriptiveVideoServiceFlag": { "shape": "CmfcDescriptiveVideoServiceFlag", "locationName": "descriptiveVideoServiceFlag", - "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." + "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." }, "IFrameOnlyManifest": { "shape": "CmfcIFrameOnlyManifest", "locationName": "iFrameOnlyManifest", - "documentation": "Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE)." + "documentation": "Choose Include to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude." }, "KlvMetadata": { "shape": "CmfcKlvMetadata", @@ -3380,44 +3380,44 @@ "ManifestMetadataSignaling": { "shape": "CmfcManifestMetadataSignaling", "locationName": "manifestMetadataSignaling", - "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough." + "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough." }, "Scte35Esam": { "shape": "CmfcScte35Esam", "locationName": "scte35Esam", - "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." }, "Scte35Source": { "shape": "CmfcScte35Source", "locationName": "scte35Source", - "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output." + "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output." }, "TimedMetadata": { "shape": "CmfcTimedMetadata", "locationName": "timedMetadata", - "documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank." + "documentation": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank." }, "TimedMetadataBoxVersion": { "shape": "CmfcTimedMetadataBoxVersion", "locationName": "timedMetadataBoxVersion", - "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough." }, "TimedMetadataSchemeIdUri": { "shape": "__stringMax1000", "locationName": "timedMetadataSchemeIdUri", - "documentation": "Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata to Passthrough." }, "TimedMetadataValue": { "shape": "__stringMax1000", "locationName": "timedMetadataValue", - "documentation": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata to Passthrough." } }, "documentation": "These settings relate to the fragmented MP4 container for the segments in your CMAF outputs." }, "CmfcTimedMetadata": { "type": "string", - "documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", + "documentation": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank.", "enum": [ "PASSTHROUGH", "NONE" @@ -3425,7 +3425,7 @@ }, "CmfcTimedMetadataBoxVersion": { "type": "string", - "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough.", + "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough.", "enum": [ "VERSION_0", "VERSION_1" @@ -3457,7 +3457,7 @@ "Hdr10Metadata": { "shape": "Hdr10Metadata", "locationName": "hdr10Metadata", - "documentation": "Use these settings when you convert to the HDR 10 color space. Specify the SMPTE ST 2086 Mastering Display Color Volume static metadata that you want signaled in the output. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator. When you set Color space conversion (ColorSpaceConversion) to HDR 10 (FORCE_HDR10), these settings are required. You must set values for Max frame average light level (maxFrameAverageLightLevel) and Max content light level (maxContentLightLevel); these settings don't have a default value. The default values for the other HDR 10 metadata settings are defined by the P3D65 color space. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr." + "documentation": "Use these settings when you convert to the HDR 10 color space. Specify the SMPTE ST 2086 Mastering Display Color Volume static metadata that you want signaled in the output. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator. When you set Color space conversion to HDR 10, these settings are required. You must set values for Max frame average light level and Max content light level; these settings don't have a default value. The default values for the other HDR 10 metadata settings are defined by the P3D65 color space. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr." }, "HdrToSdrToneMapper": { "shape": "HDRToSDRToneMapper", @@ -3489,7 +3489,7 @@ }, "ColorMetadata": { "type": "string", - "documentation": "Choose Insert (INSERT) for this setting to include color metadata in this output. Choose Ignore (IGNORE) to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default.", + "documentation": "Choose Insert for this setting to include color metadata in this output. Choose Ignore to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default.", "enum": [ "IGNORE", "INSERT" @@ -3525,7 +3525,7 @@ }, "ColorSpaceUsage": { "type": "string", - "documentation": "There are two sources for color metadata, the input file and the job input settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). The Color space usage setting determines which takes precedence. Choose Force (FORCE) to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings.", + "documentation": "There are two sources for color metadata, the input file and the job input settings Color space and HDR master display information settings. The Color space usage setting determines which takes precedence. Choose Force to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings.", "enum": [ "FORCE", "FALLBACK" @@ -3573,7 +3573,7 @@ "M2tsSettings": { "shape": "M2tsSettings", "locationName": "m2tsSettings", - "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." + "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." }, "M3u8Settings": { "shape": "M3u8Settings", @@ -3896,7 +3896,7 @@ "PlaybackDeviceCompatibility": { "shape": "DashIsoPlaybackDeviceCompatibility", "locationName": "playbackDeviceCompatibility", - "documentation": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted." + "documentation": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted." }, "SpekeKeyProvider": { "shape": "SpekeKeyProvider", @@ -3908,7 +3908,7 @@ }, "DashIsoGroupAudioChannelConfigSchemeIdUri": { "type": "string", - "documentation": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.", + "documentation": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration, to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.", "enum": [ "MPEG_CHANNEL_CONFIGURATION", "DOLBY_CHANNEL_CONFIGURATION" @@ -3925,7 +3925,7 @@ "AudioChannelConfigSchemeIdUri": { "shape": "DashIsoGroupAudioChannelConfigSchemeIdUri", "locationName": "audioChannelConfigSchemeIdUri", - "documentation": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011." + "documentation": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration, to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011." }, "BaseUrl": { "shape": "__string", @@ -3940,7 +3940,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -3965,7 +3965,7 @@ "ImageBasedTrickPlay": { "shape": "DashIsoImageBasedTrickPlay", "locationName": "imageBasedTrickPlay", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, "ImageBasedTrickPlaySettings": { "shape": "DashIsoImageBasedTrickPlaySettings", @@ -3990,12 +3990,12 @@ "MpdProfile": { "shape": "DashIsoMpdProfile", "locationName": "mpdProfile", - "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE)." + "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file." }, "PtsOffsetHandlingForBFrames": { "shape": "DashIsoPtsOffsetHandlingForBFrames", "locationName": "ptsOffsetHandlingForBFrames", - "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." + "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." }, "SegmentControl": { "shape": "DashIsoSegmentControl", @@ -4005,12 +4005,12 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (DashIsoSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." }, "SegmentLengthControl": { "shape": "DashIsoSegmentLengthControl", "locationName": "segmentLengthControl", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." }, "VideoCompositionOffsets": { "shape": "DashIsoVideoCompositionOffsets", @@ -4023,7 +4023,7 @@ "documentation": "If you get an HTTP error in the 400 range when you play back your DASH output, enable this setting and run your transcoding job again. When you enable this setting, the service writes precise segment durations in the DASH manifest. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When you don't enable this setting, the service writes approximate segment durations in your DASH manifest." } }, - "documentation": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to DASH_ISO_GROUP_SETTINGS." + "documentation": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "DashIsoHbbtvCompliance": { "type": "string", @@ -4035,7 +4035,7 @@ }, "DashIsoImageBasedTrickPlay": { "type": "string", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "enum": [ "NONE", "THUMBNAIL", @@ -4097,7 +4097,7 @@ }, "DashIsoMpdProfile": { "type": "string", - "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE).", + "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file.", "enum": [ "MAIN_PROFILE", "ON_DEMAND_PROFILE" @@ -4105,7 +4105,7 @@ }, "DashIsoPlaybackDeviceCompatibility": { "type": "string", - "documentation": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted.", + "documentation": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted.", "enum": [ "CENC_V1", "UNENCRYPTED_SEI" @@ -4113,7 +4113,7 @@ }, "DashIsoPtsOffsetHandlingForBFrames": { "type": "string", - "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", + "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", "enum": [ "ZERO_BASED", "MATCH_INITIAL_PTS" @@ -4129,7 +4129,7 @@ }, "DashIsoSegmentLengthControl": { "type": "string", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "enum": [ "EXACT", "GOP_MULTIPLE" @@ -4145,7 +4145,7 @@ }, "DashIsoWriteSegmentTimelineInRepresentation": { "type": "string", - "documentation": "When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", + "documentation": "When you enable Precise segment duration in manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", "enum": [ "ENABLED", "DISABLED" @@ -4196,7 +4196,7 @@ "Mode": { "shape": "DeinterlacerMode", "locationName": "mode", - "documentation": "Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. Default is Deinterlace. - Deinterlace converts interlaced to progressive. - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. - Adaptive auto-detects and converts to progressive." + "documentation": "Use Deinterlacer to choose how the service will do deinterlacing. Default is Deinterlace.\n- Deinterlace converts interlaced to progressive.\n- Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p.\n- Adaptive auto-detects and converts to progressive." } }, "documentation": "Settings for deinterlacer" @@ -4211,7 +4211,7 @@ }, "DeinterlacerMode": { "type": "string", - "documentation": "Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. Default is Deinterlace. - Deinterlace converts interlaced to progressive. - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. - Adaptive auto-detects and converts to progressive.", + "documentation": "Use Deinterlacer to choose how the service will do deinterlacing. Default is Deinterlace.\n- Deinterlace converts interlaced to progressive.\n- Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p.\n- Adaptive auto-detects and converts to progressive.", "enum": [ "DEINTERLACE", "INVERSE_TELECINE", @@ -4428,7 +4428,7 @@ }, "DropFrameTimecode": { "type": "string", - "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion (TimecodeInsertion) is enabled.", + "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled.", "enum": [ "DISABLED", "ENABLED" @@ -4453,7 +4453,7 @@ "documentation": "The number of milliseconds between instances of this table in the output transport stream." } }, - "documentation": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output." }, "DvbSdtSettings": { "type": "structure", @@ -4479,7 +4479,7 @@ "documentation": "The service provider name placed in the service_descriptor in the Service Description Table. Maximum length is 256 characters." } }, - "documentation": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output." }, "DvbSubDestinationSettings": { "type": "structure", @@ -4492,17 +4492,17 @@ "ApplyFontColor": { "shape": "DvbSubtitleApplyFontColor", "locationName": "applyFontColor", - "documentation": "Ignore this setting unless Style Passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." + "documentation": "Ignore this setting unless Style Passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." }, "BackgroundColor": { "shape": "DvbSubtitleBackgroundColor", "locationName": "backgroundColor", - "documentation": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present." + "documentation": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present." }, "BackgroundOpacity": { "shape": "__integerMin0Max255", "locationName": "backgroundOpacity", - "documentation": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." }, "DdsHandling": { "shape": "DvbddsHandling", @@ -4512,22 +4512,22 @@ "DdsXCoordinate": { "shape": "__integerMin0Max2147483647", "locationName": "ddsXCoordinate", - "documentation": "Use this setting, along with DDS y-coordinate (ddsYCoordinate), to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the left side of the frame and the left side of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match." + "documentation": "Use this setting, along with DDS y-coordinate, to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the left side of the frame and the left side of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling to a value other than None. MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment. All burn-in and DVB-Sub font settings must match." }, "DdsYCoordinate": { "shape": "__integerMin0Max2147483647", "locationName": "ddsYCoordinate", - "documentation": "Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the top of the frame and the top of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match." + "documentation": "Use this setting, along with DDS x-coordinate, to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the top of the frame and the top of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling to a value other than None. MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match." }, "FallbackFont": { "shape": "DvbSubSubtitleFallbackFont", "locationName": "fallbackFont", - "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." }, "FontColor": { "shape": "DvbSubtitleFontColor", "locationName": "fontColor", - "documentation": "Specify the color of the captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the color of the captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "FontOpacity": { "shape": "__integerMin0Max255", @@ -4537,22 +4537,22 @@ "FontResolution": { "shape": "__integerMin96Max600", "locationName": "fontResolution", - "documentation": "Specify the Font resolution (FontResolution) in DPI (dots per inch).\nWithin your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the Font resolution in DPI (dots per inch).\nWithin your job settings, all of your DVB-Sub settings must be identical." }, "FontScript": { "shape": "FontScript", "locationName": "fontScript", - "documentation": "Set Font script (FontScript) to Automatically determined (AUTOMATIC), or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Set Font script to Automatically determined, or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese. Within your job settings, all of your DVB-Sub settings must be identical." }, "FontSize": { "shape": "__integerMin0Max96", "locationName": "fontSize", - "documentation": "Specify the Font size (FontSize) in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size. Within your job settings, all of your DVB-Sub settings must be identical." }, "Height": { "shape": "__integerMin1Max2147483647", "locationName": "height", - "documentation": "Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match." + "documentation": "Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling to a value other than None. All burn-in and DVB-Sub font settings must match." }, "HexFontColor": { "shape": "__stringMin6Max8Pattern09aFAF609aFAF2", @@ -4562,22 +4562,22 @@ "OutlineColor": { "shape": "DvbSubtitleOutlineColor", "locationName": "outlineColor", - "documentation": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "OutlineSize": { "shape": "__integerMin0Max10", "locationName": "outlineSize", - "documentation": "Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave Outline size blank and set Style passthrough (StylePassthrough) to enabled to use the outline size data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "ShadowColor": { "shape": "DvbSubtitleShadowColor", "locationName": "shadowColor", - "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "ShadowOpacity": { "shape": "__integerMin0Max255", "locationName": "shadowOpacity", - "documentation": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." }, "ShadowXOffset": { "shape": "__integerMinNegative2147483648Max2147483647", @@ -4587,12 +4587,12 @@ "ShadowYOffset": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "shadowYOffset", - "documentation": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset (ShadowYOffset) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow y-offset data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset blank and set Style passthrough to enabled to use the shadow y-offset data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "StylePassthrough": { "shape": "DvbSubtitleStylePassthrough", "locationName": "stylePassthrough", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." }, "SubtitlingType": { "shape": "DvbSubtitlingType", @@ -4602,25 +4602,25 @@ "TeletextSpacing": { "shape": "DvbSubtitleTeletextSpacing", "locationName": "teletextSpacing", - "documentation": "Specify whether the Text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify whether the Text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical." }, "Width": { "shape": "__integerMin1Max2147483647", "locationName": "width", - "documentation": "Specify the width, in pixels, of this set of DVB-Sub captions. The default value is 720 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match." + "documentation": "Specify the width, in pixels, of this set of DVB-Sub captions. The default value is 720 pixels. Related setting: When you use this setting, you must set DDS handling to a value other than None. All burn-in and DVB-Sub font settings must match." }, "XPosition": { "shape": "__integerMin0Max2147483647", "locationName": "xPosition", - "documentation": "Specify the horizontal position (XPosition) of the captions, relative to the left side of the outputin pixels. A value of 10 would result in the captions starting 10 pixels from the left ofthe output. If no explicit x_position is provided, the horizontal caption position will bedetermined by the alignment parameter. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the horizontal position of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter. Within your job settings, all of your DVB-Sub settings must be identical." }, "YPosition": { "shape": "__integerMin0Max2147483647", "locationName": "yPosition", - "documentation": "Specify the vertical position (YPosition) of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the vertical position of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. Within your job settings, all of your DVB-Sub settings must be identical." } }, - "documentation": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to DVB_SUB." + "documentation": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html." }, "DvbSubSourceSettings": { "type": "structure", @@ -4635,7 +4635,7 @@ }, "DvbSubSubtitleFallbackFont": { "type": "string", - "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", "enum": [ "BEST_MATCH", "MONOSPACED_SANSSERIF", @@ -4655,7 +4655,7 @@ }, "DvbSubtitleApplyFontColor": { "type": "string", - "documentation": "Ignore this setting unless Style Passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", + "documentation": "Ignore this setting unless Style Passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", "enum": [ "WHITE_TEXT_ONLY", "ALL_TEXT" @@ -4663,7 +4663,7 @@ }, "DvbSubtitleBackgroundColor": { "type": "string", - "documentation": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present.", + "documentation": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present.", "enum": [ "NONE", "BLACK", @@ -4673,7 +4673,7 @@ }, "DvbSubtitleFontColor": { "type": "string", - "documentation": "Specify the color of the captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "documentation": "Specify the color of the captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "enum": [ "WHITE", "BLACK", @@ -4687,7 +4687,7 @@ }, "DvbSubtitleOutlineColor": { "type": "string", - "documentation": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "documentation": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "enum": [ "BLACK", "WHITE", @@ -4700,7 +4700,7 @@ }, "DvbSubtitleShadowColor": { "type": "string", - "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "enum": [ "NONE", "BLACK", @@ -4710,7 +4710,7 @@ }, "DvbSubtitleStylePassthrough": { "type": "string", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", "enum": [ "ENABLED", "DISABLED" @@ -4718,7 +4718,7 @@ }, "DvbSubtitleTeletextSpacing": { "type": "string", - "documentation": "Specify whether the Text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical.", + "documentation": "Specify whether the Text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical.", "enum": [ "FIXED_GRID", "PROPORTIONAL", @@ -4742,7 +4742,7 @@ "documentation": "The number of milliseconds between instances of this table in the output transport stream." } }, - "documentation": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output." }, "DvbddsHandling": { "type": "string", @@ -4780,7 +4780,7 @@ }, "Eac3AtmosDownmixControl": { "type": "string", - "documentation": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom (SPECIFIED) to provide downmix values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for Downmix control (DownmixControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings.", + "documentation": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom to provide downmix values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround, Left total/Right total surround, Left total/Right total center, Left only/Right only center, and Stereo downmix. When you keep Custom for Downmix control and you don't specify values for the related settings, MediaConvert uses default values for those settings.", "enum": [ "SPECIFIED", "INITIALIZE_FROM_SOURCE" @@ -4788,7 +4788,7 @@ }, "Eac3AtmosDynamicRangeCompressionLine": { "type": "string", - "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression line. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "NONE", "FILM_STANDARD", @@ -4800,7 +4800,7 @@ }, "Eac3AtmosDynamicRangeCompressionRf": { "type": "string", - "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression RF. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "NONE", "FILM_STANDARD", @@ -4812,7 +4812,7 @@ }, "Eac3AtmosDynamicRangeControl": { "type": "string", - "documentation": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom (SPECIFIED), to provide dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings.", + "documentation": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom, to provide dynamic range control values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line and Dynamic range compression RF. When you keep the value Custom for Dynamic range control and you don't specify values for the related settings, MediaConvert uses default values for those settings.", "enum": [ "SPECIFIED", "INITIALIZE_FROM_SOURCE" @@ -4855,42 +4855,42 @@ "DownmixControl": { "shape": "Eac3AtmosDownmixControl", "locationName": "downmixControl", - "documentation": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom (SPECIFIED) to provide downmix values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for Downmix control (DownmixControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings." + "documentation": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom to provide downmix values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround, Left total/Right total surround, Left total/Right total center, Left only/Right only center, and Stereo downmix. When you keep Custom for Downmix control and you don't specify values for the related settings, MediaConvert uses default values for those settings." }, "DynamicRangeCompressionLine": { "shape": "Eac3AtmosDynamicRangeCompressionLine", "locationName": "dynamicRangeCompressionLine", - "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression line. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "DynamicRangeCompressionRf": { "shape": "Eac3AtmosDynamicRangeCompressionRf", "locationName": "dynamicRangeCompressionRf", - "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression RF. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "DynamicRangeControl": { "shape": "Eac3AtmosDynamicRangeControl", "locationName": "dynamicRangeControl", - "documentation": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom (SPECIFIED), to provide dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings." + "documentation": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom, to provide dynamic range control values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line and Dynamic range compression RF. When you keep the value Custom for Dynamic range control and you don't specify values for the related settings, MediaConvert uses default values for those settings." }, "LoRoCenterMixLevel": { "shape": "__doubleMinNegative6Max3", "locationName": "loRoCenterMixLevel", - "documentation": "Specify a value for the following Dolby Atmos setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right only center (LoRoCenterMixLevel)." + "documentation": "Specify a value for the following Dolby Atmos setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left only/Right only center." }, "LoRoSurroundMixLevel": { "shape": "__doubleMinNegative60MaxNegative1", "locationName": "loRoSurroundMixLevel", - "documentation": "Specify a value for the following Dolby Atmos setting: Left only/Right only (Lo/Ro surround). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right only surround (LoRoSurroundMixLevel)." + "documentation": "Specify a value for the following Dolby Atmos setting: Left only/Right only. MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left only/Right only surround." }, "LtRtCenterMixLevel": { "shape": "__doubleMinNegative6Max3", "locationName": "ltRtCenterMixLevel", - "documentation": "Specify a value for the following Dolby Atmos setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left total/Right total center (LtRtCenterMixLevel)." + "documentation": "Specify a value for the following Dolby Atmos setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default value: -3 dB Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left total/Right total center." }, "LtRtSurroundMixLevel": { "shape": "__doubleMinNegative60MaxNegative1", "locationName": "ltRtSurroundMixLevel", - "documentation": "Specify a value for the following Dolby Atmos setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, the service ignores Left total/Right total surround (LtRtSurroundMixLevel)." + "documentation": "Specify a value for the following Dolby Atmos setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. Default value: -3 dB Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, the service ignores Left total/Right total surround." }, "MeteringMode": { "shape": "Eac3AtmosMeteringMode", @@ -4910,7 +4910,7 @@ "StereoDownmix": { "shape": "Eac3AtmosStereoDownmix", "locationName": "stereoDownmix", - "documentation": "Choose how the service does stereo downmixing. Default value: Not indicated (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo downmix (StereoDownmix)." + "documentation": "Choose how the service does stereo downmixing. Default value: Not indicated Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo downmix." }, "SurroundExMode": { "shape": "Eac3AtmosSurroundExMode", @@ -4918,11 +4918,11 @@ "documentation": "Specify whether your input audio has an additional center rear surround channel matrix encoded into your left and right surround channels." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3_ATMOS." + "documentation": "Required when you set Codec to the value EAC3_ATMOS." }, "Eac3AtmosStereoDownmix": { "type": "string", - "documentation": "Choose how the service does stereo downmixing. Default value: Not indicated (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo downmix (StereoDownmix).", + "documentation": "Choose how the service does stereo downmixing. Default value: Not indicated Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo downmix.", "enum": [ "NOT_INDICATED", "STEREO", @@ -4977,7 +4977,7 @@ }, "Eac3DynamicRangeCompressionLine": { "type": "string", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "NONE", "FILM_STANDARD", @@ -4989,7 +4989,7 @@ }, "Eac3DynamicRangeCompressionRf": { "type": "string", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "NONE", "FILM_STANDARD", @@ -5075,12 +5075,12 @@ "DynamicRangeCompressionLine": { "shape": "Eac3DynamicRangeCompressionLine", "locationName": "dynamicRangeCompressionLine", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "DynamicRangeCompressionRf": { "shape": "Eac3DynamicRangeCompressionRf", "locationName": "dynamicRangeCompressionRf", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "LfeControl": { "shape": "Eac3LfeControl", @@ -5095,22 +5095,22 @@ "LoRoCenterMixLevel": { "shape": "__doubleMinNegative60Max3", "locationName": "loRoCenterMixLevel", - "documentation": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel)." + "documentation": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only center mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left only/Right only center." }, "LoRoSurroundMixLevel": { "shape": "__doubleMinNegative60MaxNegative1", "locationName": "loRoSurroundMixLevel", - "documentation": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left only/Right only surround (loRoSurroundMixLevel)." + "documentation": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left only/Right only surround." }, "LtRtCenterMixLevel": { "shape": "__doubleMinNegative60Max3", "locationName": "ltRtCenterMixLevel", - "documentation": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel)." + "documentation": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total center mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left total/Right total center." }, "LtRtSurroundMixLevel": { "shape": "__doubleMinNegative60MaxNegative1", "locationName": "ltRtSurroundMixLevel", - "documentation": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel)." + "documentation": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total surround mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left total/Right total surround." }, "MetadataControl": { "shape": "Eac3MetadataControl", @@ -5135,7 +5135,7 @@ "StereoDownmix": { "shape": "Eac3StereoDownmix", "locationName": "stereoDownmix", - "documentation": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix)." + "documentation": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Stereo downmix." }, "SurroundExMode": { "shape": "Eac3SurroundExMode", @@ -5148,11 +5148,11 @@ "documentation": "When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into the two channels." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3." + "documentation": "Required when you set Codec to the value EAC3." }, "Eac3StereoDownmix": { "type": "string", - "documentation": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix).", + "documentation": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Stereo downmix.", "enum": [ "NOT_INDICATED", "LO_RO", @@ -5180,7 +5180,7 @@ }, "EmbeddedConvert608To708": { "type": "string", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "enum": [ "UPCONVERT", "DISABLED" @@ -5197,10 +5197,10 @@ "Destination708ServiceNumber": { "shape": "__integerMin1Max6", "locationName": "destination708ServiceNumber", - "documentation": "Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded." + "documentation": "Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert to Upconvert in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded." } }, - "documentation": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED." + "documentation": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html." }, "EmbeddedSourceSettings": { "type": "structure", @@ -5208,7 +5208,7 @@ "Convert608To708": { "shape": "EmbeddedConvert608To708", "locationName": "convert608To708", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "Source608ChannelNumber": { "shape": "__integerMin1Max4", @@ -5238,7 +5238,7 @@ }, "EmbeddedTimecodeOverride": { "type": "string", - "documentation": "Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode override blank, or set to None (NONE), when your input does not contain MDPM timecode.", + "documentation": "Set Embedded timecode override to Use MDPM when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata. When you do, we recommend you also set Timecode source to Embedded. Leave Embedded timecode override blank, or set to None, when your input does not contain MDPM timecode.", "enum": [ "NONE", "USE_MDPM" @@ -5272,7 +5272,7 @@ "ManifestConfirmConditionNotification": { "shape": "EsamManifestConfirmConditionNotification", "locationName": "manifestConfirmConditionNotification", - "documentation": "Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning instructions that you provide in the setting MCC XML (mccXml)." + "documentation": "Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning instructions that you provide in the setting MCC XML." }, "ResponseSignalPreroll": { "shape": "__integerMin0Max30000", @@ -5282,7 +5282,7 @@ "SignalProcessingNotification": { "shape": "EsamSignalProcessingNotification", "locationName": "signalProcessingNotification", - "documentation": "Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing instructions that you provide in the setting SCC XML (sccXml)." + "documentation": "Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing instructions that you provide in the setting SCC XML." } }, "documentation": "Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, you can ignore these settings." @@ -5293,7 +5293,7 @@ "SccXml": { "shape": "__stringPatternSNSignalProcessingNotificationNS", "locationName": "sccXml", - "documentation": "Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both." + "documentation": "Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM. Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both." } }, "documentation": "ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025." @@ -5348,7 +5348,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -5356,11 +5356,11 @@ "documentation": "Settings associated with the destination. Will vary based on the type of destination" } }, - "documentation": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to FILE_GROUP_SETTINGS." + "documentation": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package." }, "FileSourceConvert608To708": { "type": "string", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "enum": [ "UPCONVERT", "DISABLED" @@ -5372,7 +5372,7 @@ "Convert608To708": { "shape": "FileSourceConvert608To708", "locationName": "convert608To708", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "ConvertPaintToPop": { "shape": "CaptionSourceConvertPaintOnToPopOn", @@ -5382,7 +5382,7 @@ "Framerate": { "shape": "CaptionSourceFramerate", "locationName": "framerate", - "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. When you work directly in your JSON job specification, use the settings framerateNumerator and framerateDenominator. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." }, "SourceFile": { "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIVttVTTWebvttWEBVTTHttpsSccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIVttVTTWebvttWEBVTT", @@ -5392,19 +5392,19 @@ "TimeDelta": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "timeDelta", - "documentation": "Optional. Use this setting when you need to adjust the sync between your sidecar captions and your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/time-delta-use-cases.html. Enter a positive or negative number to modify the times in the captions file. For example, type 15 to add 15 seconds to all the times in the captions file. Type -5 to subtract 5 seconds from the times in the captions file. You can optionally specify your time delta in milliseconds instead of seconds. When you do so, set the related setting, Time delta units (TimeDeltaUnits) to Milliseconds (MILLISECONDS). Note that, when you specify a time delta for timecode-based caption sources, such as SCC and STL, and your time delta isn't a multiple of the input frame rate, MediaConvert snaps the captions to the nearest frame. For example, when your input video frame rate is 25 fps and you specify 1010ms for time delta, MediaConvert delays your captions by 1000 ms." + "documentation": "Optional. Use this setting when you need to adjust the sync between your sidecar captions and your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/time-delta-use-cases.html. Enter a positive or negative number to modify the times in the captions file. For example, type 15 to add 15 seconds to all the times in the captions file. Type -5 to subtract 5 seconds from the times in the captions file. You can optionally specify your time delta in milliseconds instead of seconds. When you do so, set the related setting, Time delta units to Milliseconds. Note that, when you specify a time delta for timecode-based caption sources, such as SCC and STL, and your time delta isn't a multiple of the input frame rate, MediaConvert snaps the captions to the nearest frame. For example, when your input video frame rate is 25 fps and you specify 1010ms for time delta, MediaConvert delays your captions by 1000 ms." }, "TimeDeltaUnits": { "shape": "FileSourceTimeDeltaUnits", "locationName": "timeDeltaUnits", - "documentation": "When you use the setting Time delta (TimeDelta) to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units (TimeDeltaUnits), MediaConvert uses seconds by default." + "documentation": "When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default." } }, "documentation": "If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings." }, "FileSourceTimeDeltaUnits": { "type": "string", - "documentation": "When you use the setting Time delta (TimeDelta) to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units (TimeDeltaUnits), MediaConvert uses seconds by default.", + "documentation": "When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default.", "enum": [ "SECONDS", "MILLISECONDS" @@ -5473,7 +5473,7 @@ "documentation": "JPEG Quality - a higher value equals higher quality." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value FRAME_CAPTURE." + "documentation": "Required when you set Codec to the value FRAME_CAPTURE." }, "GetJobRequest": { "type": "structure", @@ -5588,7 +5588,7 @@ }, "H264AdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.", + "documentation": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.", "enum": [ "OFF", "AUTO", @@ -5601,7 +5601,7 @@ }, "H264CodecLevel": { "type": "string", - "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto (AUTO).", + "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto.", "enum": [ "AUTO", "LEVEL_1", @@ -5636,7 +5636,7 @@ }, "H264DynamicSubGop": { "type": "string", - "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", "enum": [ "ADAPTIVE", "STATIC" @@ -5652,7 +5652,7 @@ }, "H264FieldEncoding": { "type": "string", - "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs.", + "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs.", "enum": [ "PAFF", "FORCE_FIELD", @@ -5661,7 +5661,7 @@ }, "H264FlickerAdaptiveQuantization": { "type": "string", - "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled. Change this value to Enabled to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -5669,7 +5669,7 @@ }, "H264FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -5694,7 +5694,7 @@ }, "H264GopSizeUnits": { "type": "string", - "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize).", + "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size.", "enum": [ "FRAMES", "SECONDS", @@ -5703,7 +5703,7 @@ }, "H264InterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -5714,7 +5714,7 @@ }, "H264ParControl": { "type": "string", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -5740,7 +5740,7 @@ "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", - "documentation": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + "documentation": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." }, "QvbrQualityLevelFineTune": { "shape": "__doubleMin0Max1", @@ -5748,7 +5748,7 @@ "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." } }, - "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Use these settings only when you set QVBR for Rate control mode." }, "H264RateControlMode": { "type": "string", @@ -5769,7 +5769,7 @@ }, "H264ScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -5777,7 +5777,7 @@ }, "H264SceneChangeDetect": { "type": "string", - "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", + "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", "enum": [ "DISABLED", "ENABLED", @@ -5790,7 +5790,7 @@ "AdaptiveQuantization": { "shape": "H264AdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization." + "documentation": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization." }, "BandwidthReductionFilter": { "shape": "BandwidthReductionFilter", @@ -5805,7 +5805,7 @@ "CodecLevel": { "shape": "H264CodecLevel", "locationName": "codecLevel", - "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto (AUTO)." + "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto." }, "CodecProfile": { "shape": "H264CodecProfile", @@ -5825,17 +5825,17 @@ "FieldEncoding": { "shape": "H264FieldEncoding", "locationName": "fieldEncoding", - "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs." + "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs." }, "FlickerAdaptiveQuantization": { "shape": "H264FlickerAdaptiveQuantization", "locationName": "flickerAdaptiveQuantization", - "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled. Change this value to Enabled to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." }, "FramerateControl": { "shape": "H264FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "H264FramerateConversionAlgorithm", @@ -5860,17 +5860,17 @@ "GopClosedCadence": { "shape": "__integerMin0Max2147483647", "locationName": "gopClosedCadence", - "documentation": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0." + "documentation": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0." }, "GopSize": { "shape": "__doubleMin0", "locationName": "gopSize", - "documentation": "Use this setting only when you set GOP mode control (GopSizeUnits) to Specified, frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control (GopSizeUnits). If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto (AUTO). If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group." + "documentation": "Use this setting only when you set GOP mode control to Specified, frames or Specified, seconds. Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control. If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto. If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group." }, "GopSizeUnits": { "shape": "H264GopSizeUnits", "locationName": "gopSizeUnits", - "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize)." + "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size." }, "HrdBufferFinalFillPercentage": { "shape": "__integerMin0Max100", @@ -5890,7 +5890,7 @@ "InterlaceMode": { "shape": "H264InterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "MaxBitrate": { "shape": "__integerMin1000Max1152000000", @@ -5900,7 +5900,7 @@ "MinIInterval": { "shape": "__integerMin0Max30", "locationName": "minIInterval", - "documentation": "Use this setting only when you also enable Scene change detection (SceneChangeDetect). This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval (minIInterval) to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." + "documentation": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." }, "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", @@ -5915,17 +5915,17 @@ "ParControl": { "shape": "H264ParControl", "locationName": "parControl", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "H264QualityTuningLevel", @@ -5935,7 +5935,7 @@ "QvbrSettings": { "shape": "H264QvbrSettings", "locationName": "qvbrSettings", - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." }, "RateControlMode": { "shape": "H264RateControlMode", @@ -5950,12 +5950,12 @@ "ScanTypeConversionMode": { "shape": "H264ScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SceneChangeDetect": { "shape": "H264SceneChangeDetect", "locationName": "sceneChangeDetect", - "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." + "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." }, "Slices": { "shape": "__integerMin1Max32", @@ -5965,17 +5965,17 @@ "SlowPal": { "shape": "H264SlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." }, "Softness": { "shape": "__integerMin0Max128", "locationName": "softness", - "documentation": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." + "documentation": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness or by enabling a noise reducer filter. The Softness setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." }, "SpatialAdaptiveQuantization": { "shape": "H264SpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "Only use this setting when you change the default value, Auto (AUTO), for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (H264AdaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "documentation": "Only use this setting when you change the default value, Auto, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." }, "Syntax": { "shape": "H264Syntax", @@ -5985,12 +5985,12 @@ "Telecine": { "shape": "H264Telecine", "locationName": "telecine", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." }, "TemporalAdaptiveQuantization": { "shape": "H264TemporalAdaptiveQuantization", "locationName": "temporalAdaptiveQuantization", - "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization). To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization. To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." }, "UnregisteredSeiTimecode": { "shape": "H264UnregisteredSeiTimecode", @@ -5998,11 +5998,11 @@ "documentation": "Inserts timecode for each frame as 4 bytes of an unregistered SEI message." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value H_264." + "documentation": "Required when you set Codec to the value H_264." }, "H264SlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -6010,7 +6010,7 @@ }, "H264SpatialAdaptiveQuantization": { "type": "string", - "documentation": "Only use this setting when you change the default value, Auto (AUTO), for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (H264AdaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "documentation": "Only use this setting when you change the default value, Auto, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -6026,7 +6026,7 @@ }, "H264Telecine": { "type": "string", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "enum": [ "NONE", "SOFT", @@ -6035,7 +6035,7 @@ }, "H264TemporalAdaptiveQuantization": { "type": "string", - "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization). To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization. To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -6051,7 +6051,7 @@ }, "H265AdaptiveQuantization": { "type": "string", - "documentation": "When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low (LOW), Medium (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive Quantization (H265FlickerAdaptiveQuantization), to further control the quantization filter. Set Adaptive Quantization to Off (OFF) to apply no quantization to your output.", + "documentation": "When you set Adaptive Quantization to Auto, or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, and Flicker Adaptive Quantization, to further control the quantization filter. Set Adaptive Quantization to Off to apply no quantization to your output.", "enum": [ "OFF", "LOW", @@ -6106,7 +6106,7 @@ }, "H265DynamicSubGop": { "type": "string", - "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", "enum": [ "ADAPTIVE", "STATIC" @@ -6114,7 +6114,7 @@ }, "H265FlickerAdaptiveQuantization": { "type": "string", - "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off (OFF).", + "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off.", "enum": [ "DISABLED", "ENABLED" @@ -6122,7 +6122,7 @@ }, "H265FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -6147,7 +6147,7 @@ }, "H265GopSizeUnits": { "type": "string", - "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize).", + "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size.", "enum": [ "FRAMES", "SECONDS", @@ -6156,7 +6156,7 @@ }, "H265InterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -6167,7 +6167,7 @@ }, "H265ParControl": { "type": "string", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -6175,7 +6175,7 @@ }, "H265QualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "SINGLE_PASS_HQ", @@ -6193,7 +6193,7 @@ "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", - "documentation": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + "documentation": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." }, "QvbrQualityLevelFineTune": { "shape": "__doubleMin0Max1", @@ -6201,7 +6201,7 @@ "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." } }, - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." }, "H265RateControlMode": { "type": "string", @@ -6223,7 +6223,7 @@ }, "H265ScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -6231,7 +6231,7 @@ }, "H265SceneChangeDetect": { "type": "string", - "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", + "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", "enum": [ "DISABLED", "ENABLED", @@ -6244,7 +6244,7 @@ "AdaptiveQuantization": { "shape": "H265AdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low (LOW), Medium (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive Quantization (H265FlickerAdaptiveQuantization), to further control the quantization filter. Set Adaptive Quantization to Off (OFF) to apply no quantization to your output." + "documentation": "When you set Adaptive Quantization to Auto, or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, and Flicker Adaptive Quantization, to further control the quantization filter. Set Adaptive Quantization to Off to apply no quantization to your output." }, "AlternateTransferFunctionSei": { "shape": "H265AlternateTransferFunctionSei", @@ -6279,12 +6279,12 @@ "FlickerAdaptiveQuantization": { "shape": "H265FlickerAdaptiveQuantization", "locationName": "flickerAdaptiveQuantization", - "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off (OFF)." + "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off." }, "FramerateControl": { "shape": "H265FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "H265FramerateConversionAlgorithm", @@ -6309,17 +6309,17 @@ "GopClosedCadence": { "shape": "__integerMin0Max2147483647", "locationName": "gopClosedCadence", - "documentation": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0." + "documentation": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0." }, "GopSize": { "shape": "__doubleMin0", "locationName": "gopSize", - "documentation": "Use this setting only when you set GOP mode control (GopSizeUnits) to Specified, frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control (GopSizeUnits). If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto (AUTO). If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group." + "documentation": "Use this setting only when you set GOP mode control to Specified, frames or Specified, seconds. Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control. If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto. If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group." }, "GopSizeUnits": { "shape": "H265GopSizeUnits", "locationName": "gopSizeUnits", - "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize)." + "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size." }, "HrdBufferFinalFillPercentage": { "shape": "__integerMin0Max100", @@ -6339,7 +6339,7 @@ "InterlaceMode": { "shape": "H265InterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "MaxBitrate": { "shape": "__integerMin1000Max1466400000", @@ -6349,7 +6349,7 @@ "MinIInterval": { "shape": "__integerMin0Max30", "locationName": "minIInterval", - "documentation": "Use this setting only when you also enable Scene change detection (SceneChangeDetect). This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval (minIInterval) to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." + "documentation": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." }, "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", @@ -6364,27 +6364,27 @@ "ParControl": { "shape": "H265ParControl", "locationName": "parControl", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "H265QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "QvbrSettings": { "shape": "H265QvbrSettings", "locationName": "qvbrSettings", - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." }, "RateControlMode": { "shape": "H265RateControlMode", @@ -6399,12 +6399,12 @@ "ScanTypeConversionMode": { "shape": "H265ScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SceneChangeDetect": { "shape": "H265SceneChangeDetect", "locationName": "sceneChangeDetect", - "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." + "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." }, "Slices": { "shape": "__integerMin1Max32", @@ -6414,22 +6414,22 @@ "SlowPal": { "shape": "H265SlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." }, "SpatialAdaptiveQuantization": { "shape": "H265SpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." }, "Telecine": { "shape": "H265Telecine", "locationName": "telecine", - "documentation": "This field applies only if the Streams > Advanced > Framerate (framerate) field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced Mode field (interlace_mode) to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i." + "documentation": "This field applies only if the Streams > Advanced > Framerate field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field and the Streams > Advanced > Interlaced Mode field to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i." }, "TemporalAdaptiveQuantization": { "shape": "H265TemporalAdaptiveQuantization", "locationName": "temporalAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization)." + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization." }, "TemporalIds": { "shape": "H265TemporalIds", @@ -6456,7 +6456,7 @@ }, "H265SlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -6464,7 +6464,7 @@ }, "H265SpatialAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "enum": [ "DISABLED", "ENABLED" @@ -6472,7 +6472,7 @@ }, "H265Telecine": { "type": "string", - "documentation": "This field applies only if the Streams > Advanced > Framerate (framerate) field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced Mode field (interlace_mode) to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i.", + "documentation": "This field applies only if the Streams > Advanced > Framerate field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field and the Streams > Advanced > Interlaced Mode field to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i.", "enum": [ "NONE", "SOFT", @@ -6481,7 +6481,7 @@ }, "H265TemporalAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization).", + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization.", "enum": [ "DISABLED", "ENABLED" @@ -6635,7 +6635,7 @@ }, "HlsAudioOnlyContainer": { "type": "string", - "documentation": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic (AUTOMATIC) to create a raw audio-only file with no container. Regardless of the value that you specify here, if this output has video, the service will place outputs into an MPEG2-TS container.", + "documentation": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic to create a raw audio-only file with no container. Regardless of the value that you specify here, if this output has video, the service will place outputs into an MPEG2-TS container.", "enum": [ "AUTOMATIC", "M2TS" @@ -6643,7 +6643,7 @@ }, "HlsAudioOnlyHeader": { "type": "string", - "documentation": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include (INCLUDE), to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only headers from your audio segments.", + "documentation": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include, to output audio-only headers. Choose Exclude to remove the audio-only headers from your audio segments.", "enum": [ "INCLUDE", "EXCLUDE" @@ -6696,7 +6696,7 @@ }, "HlsCaptionSegmentLengthControl": { "type": "string", - "documentation": "Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long.", + "documentation": "Set Caption segment length control to Match video to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments to create caption segments that are 300 seconds long.", "enum": [ "LARGE_SEGMENTS", "MATCH_VIDEO" @@ -6704,7 +6704,7 @@ }, "HlsClientCache": { "type": "string", - "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header.", "enum": [ "DISABLED", "ENABLED" @@ -6720,7 +6720,7 @@ }, "HlsDescriptiveVideoServiceFlag": { "type": "string", - "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", + "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", "enum": [ "DONT_FLAG", "FLAG" @@ -6799,7 +6799,7 @@ "AudioOnlyHeader": { "shape": "HlsAudioOnlyHeader", "locationName": "audioOnlyHeader", - "documentation": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include (INCLUDE), to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only headers from your audio segments." + "documentation": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include, to output audio-only headers. Choose Exclude to remove the audio-only headers from your audio segments." }, "BaseUrl": { "shape": "__string", @@ -6819,12 +6819,12 @@ "CaptionSegmentLengthControl": { "shape": "HlsCaptionSegmentLengthControl", "locationName": "captionSegmentLengthControl", - "documentation": "Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long." + "documentation": "Set Caption segment length control to Match video to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments to create caption segments that are 300 seconds long." }, "ClientCache": { "shape": "HlsClientCache", "locationName": "clientCache", - "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header." }, "CodecSpecification": { "shape": "HlsCodecSpecification", @@ -6834,7 +6834,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -6854,7 +6854,7 @@ "ImageBasedTrickPlay": { "shape": "HlsImageBasedTrickPlay", "locationName": "imageBasedTrickPlay", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, "ImageBasedTrickPlaySettings": { "shape": "HlsImageBasedTrickPlaySettings", @@ -6909,17 +6909,17 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (HlsSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." }, "SegmentLengthControl": { "shape": "HlsSegmentLengthControl", "locationName": "segmentLengthControl", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." }, "SegmentsPerSubdirectory": { "shape": "__integerMin1Max2147483647", "locationName": "segmentsPerSubdirectory", - "documentation": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect." + "documentation": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect." }, "StreamInfResolution": { "shape": "HlsStreamInfResolution", @@ -6934,12 +6934,12 @@ "TimedMetadataId3Frame": { "shape": "HlsTimedMetadataId3Frame", "locationName": "timedMetadataId3Frame", - "documentation": "Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE)." + "documentation": "Specify the type of the ID3 frame to use for ID3 timestamps in your output. To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. To exclude ID3 timestamps: Set ID3 timestamp frame type to None." }, "TimedMetadataId3Period": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "timedMetadataId3Period", - "documentation": "Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type (timedMetadataId3Frame) to PRIV (PRIV) or TDRL (TDRL), and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." + "documentation": "Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type to PRIV or TDRL, and set ID3 metadata to Passthrough." }, "TimestampDeltaMilliseconds": { "shape": "__integerMinNegative2147483648Max2147483647", @@ -6947,11 +6947,11 @@ "documentation": "Provides an extra millisecond delta offset to fine tune the timestamps." } }, - "documentation": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to HLS_GROUP_SETTINGS." + "documentation": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "HlsIFrameOnlyManifest": { "type": "string", - "documentation": "Choose Include (INCLUDE) to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE).", + "documentation": "Choose Include to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude.", "enum": [ "INCLUDE", "EXCLUDE" @@ -6959,7 +6959,7 @@ }, "HlsImageBasedTrickPlay": { "type": "string", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "enum": [ "NONE", "THUMBNAIL", @@ -7106,7 +7106,7 @@ }, "HlsSegmentLengthControl": { "type": "string", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "enum": [ "EXACT", "GOP_MULTIPLE" @@ -7123,7 +7123,7 @@ "AudioOnlyContainer": { "shape": "HlsAudioOnlyContainer", "locationName": "audioOnlyContainer", - "documentation": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless of the value that you specify here, if this output has video, the service will place the output into an MPEG2-TS container." + "documentation": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic to create an audio-only file in a raw container. Regardless of the value that you specify here, if this output has video, the service will place the output into an MPEG2-TS container." }, "AudioRenditionSets": { "shape": "__string", @@ -7138,12 +7138,12 @@ "DescriptiveVideoServiceFlag": { "shape": "HlsDescriptiveVideoServiceFlag", "locationName": "descriptiveVideoServiceFlag", - "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." + "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." }, "IFrameOnlyManifest": { "shape": "HlsIFrameOnlyManifest", "locationName": "iFrameOnlyManifest", - "documentation": "Choose Include (INCLUDE) to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE)." + "documentation": "Choose Include to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude." }, "SegmentModifier": { "shape": "__string", @@ -7171,7 +7171,7 @@ }, "HlsTimedMetadataId3Frame": { "type": "string", - "documentation": "Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE).", + "documentation": "Specify the type of the ID3 frame to use for ID3 timestamps in your output. To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. To exclude ID3 timestamps: Set ID3 timestamp frame type to None.", "enum": [ "NONE", "PRIV", @@ -7205,15 +7205,15 @@ "Id3": { "shape": "__stringPatternAZaZ0902", "locationName": "id3", - "documentation": "Use ID3 tag (Id3) to provide a fully formed ID3 tag in base64-encode format." + "documentation": "Use ID3 tag to provide a fully formed ID3 tag in base64-encode format." }, "Timecode": { "shape": "__stringPattern010920405090509092", "locationName": "timecode", - "documentation": "Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format." + "documentation": "Provide a Timecode in HH:MM:SS:FF or HH:MM:SS;FF format." } }, - "documentation": "To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to specify the base 64 encoded string and use Timecode (TimeCode) to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion (Id3Insertion)." + "documentation": "To insert ID3 tags in your output, specify two values. Use ID3 tag to specify the base 64 encoded string and use Timecode to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion." }, "ImageInserter": { "type": "structure", @@ -7253,7 +7253,7 @@ "documentation": "Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions." } }, - "documentation": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to IMSC." + "documentation": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." }, "ImscStylePassthrough": { "type": "string", @@ -7279,12 +7279,12 @@ "AudioSelectorGroups": { "shape": "__mapOfAudioSelectorGroup", "locationName": "audioSelectorGroups", - "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." + "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." }, "AudioSelectors": { "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." + "documentation": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "CaptionSelectors": { "shape": "__mapOfCaptionSelector", @@ -7294,12 +7294,12 @@ "Crop": { "shape": "Rectangle", "locationName": "crop", - "documentation": "Use Cropping selection (crop) to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection (crop)." + "documentation": "Use Cropping selection to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection." }, "DeblockFilter": { "shape": "InputDeblockFilter", "locationName": "deblockFilter", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." + "documentation": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." }, "DecryptionSettings": { "shape": "InputDecryptionSettings", @@ -7309,7 +7309,7 @@ "DenoiseFilter": { "shape": "InputDenoiseFilter", "locationName": "denoiseFilter", - "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." + "documentation": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." }, "DolbyVisionMetadataXml": { "shape": "__stringMin14PatternS3XmlXMLHttpsXmlXML", @@ -7319,7 +7319,7 @@ "FileInput": { "shape": "__stringPatternS3Https", "locationName": "fileInput", - "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs that contain assets referenced by the CPL." + "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL." }, "FilterEnable": { "shape": "InputFilterEnable", @@ -7339,27 +7339,27 @@ "InputClippings": { "shape": "__listOfInputClipping", "locationName": "inputClippings", - "documentation": "(InputClippings) contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." + "documentation": "Contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." }, "InputScanType": { "shape": "InputScanType", "locationName": "inputScanType", - "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." + "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." }, "Position": { "shape": "Rectangle", "locationName": "position", - "documentation": "Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement (position). If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior (scalingBehavior)." + "documentation": "Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement. If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD to Respond. If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior." }, "ProgramNumber": { "shape": "__integerMin1Max2147483647", "locationName": "programNumber", - "documentation": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." + "documentation": "Use Program to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." }, "PsiControl": { "shape": "InputPsiControl", "locationName": "psiControl", - "documentation": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data." + "documentation": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data." }, "SupplementalImps": { "shape": "__listOf__stringPatternS3ASSETMAPXml", @@ -7369,12 +7369,12 @@ "TimecodeSource": { "shape": "InputTimecodeSource", "locationName": "timecodeSource", - "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "documentation": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "TimecodeStart": { "shape": "__stringMin11Max11Pattern01D20305D205D", "locationName": "timecodeStart", - "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "VideoGenerator": { "shape": "InputVideoGenerator", @@ -7395,19 +7395,19 @@ "EndTimecode": { "shape": "__stringPattern010920405090509092", "locationName": "endTimecode", - "documentation": "Set End timecode (EndTimecode) to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings (InputTimecodeSource). For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00." + "documentation": "Set End timecode to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00." }, "StartTimecode": { "shape": "__stringPattern010920405090509092", "locationName": "startTimecode", - "documentation": "Set Start timecode (StartTimecode) to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00." + "documentation": "Set Start timecode to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00." } }, "documentation": "To transcode only portions of your input, include one input clip for each part of your input that you want in your output. All input clips that you specify will be included in every output of the job. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html." }, "InputDeblockFilter": { "type": "string", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", + "documentation": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", "enum": [ "ENABLED", "DISABLED" @@ -7441,7 +7441,7 @@ }, "InputDenoiseFilter": { "type": "string", - "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.", + "documentation": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.", "enum": [ "ENABLED", "DISABLED" @@ -7466,7 +7466,7 @@ }, "InputPsiControl": { "type": "string", - "documentation": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data.", + "documentation": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data.", "enum": [ "IGNORE_PSI", "USE_PSI" @@ -7474,7 +7474,7 @@ }, "InputRotate": { "type": "string", - "documentation": "Use Rotate (InputRotate) to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata.", + "documentation": "Use Rotate to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata.", "enum": [ "DEGREE_0", "DEGREES_90", @@ -7485,7 +7485,7 @@ }, "InputSampleRange": { "type": "string", - "documentation": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow (FOLLOW), for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata.", + "documentation": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow, for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata.", "enum": [ "FOLLOW", "FULL_RANGE", @@ -7494,7 +7494,7 @@ }, "InputScanType": { "type": "string", - "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.", + "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.", "enum": [ "AUTO", "PSF" @@ -7516,12 +7516,12 @@ "AudioSelectorGroups": { "shape": "__mapOfAudioSelectorGroup", "locationName": "audioSelectorGroups", - "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." + "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." }, "AudioSelectors": { "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." + "documentation": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "CaptionSelectors": { "shape": "__mapOfCaptionSelector", @@ -7531,17 +7531,17 @@ "Crop": { "shape": "Rectangle", "locationName": "crop", - "documentation": "Use Cropping selection (crop) to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection (crop)." + "documentation": "Use Cropping selection to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection." }, "DeblockFilter": { "shape": "InputDeblockFilter", "locationName": "deblockFilter", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." + "documentation": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." }, "DenoiseFilter": { "shape": "InputDenoiseFilter", "locationName": "denoiseFilter", - "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." + "documentation": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." }, "DolbyVisionMetadataXml": { "shape": "__stringMin14PatternS3XmlXMLHttpsXmlXML", @@ -7566,37 +7566,37 @@ "InputClippings": { "shape": "__listOfInputClipping", "locationName": "inputClippings", - "documentation": "(InputClippings) contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." + "documentation": "Contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." }, "InputScanType": { "shape": "InputScanType", "locationName": "inputScanType", - "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." + "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." }, "Position": { "shape": "Rectangle", "locationName": "position", - "documentation": "Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement (position). If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior (scalingBehavior)." + "documentation": "Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement. If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD to Respond. If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior." }, "ProgramNumber": { "shape": "__integerMin1Max2147483647", "locationName": "programNumber", - "documentation": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." + "documentation": "Use Program to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." }, "PsiControl": { "shape": "InputPsiControl", "locationName": "psiControl", - "documentation": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data." + "documentation": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data." }, "TimecodeSource": { "shape": "InputTimecodeSource", "locationName": "timecodeSource", - "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "documentation": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "TimecodeStart": { "shape": "__stringMin11Max11Pattern01D20305D205D", "locationName": "timecodeStart", - "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "VideoSelector": { "shape": "VideoSelector", @@ -7608,7 +7608,7 @@ }, "InputTimecodeSource": { "type": "string", - "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", + "documentation": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", "enum": [ "EMBEDDED", "ZEROBASED", @@ -7672,7 +7672,7 @@ "Opacity": { "shape": "__integerMin0Max100", "locationName": "opacity", - "documentation": "Use Opacity (Opacity) to specify how much of the underlying video shows through the inserted image. 0 is transparent and 100 is fully opaque. Default is 50." + "documentation": "Use Opacity to specify how much of the underlying video shows through the inserted image. 0 is transparent and 100 is fully opaque. Default is 50." }, "StartTime": { "shape": "__stringPattern01D20305D205D", @@ -7897,7 +7897,7 @@ "Inputs": { "shape": "__listOfInput", "locationName": "inputs", - "documentation": "Use Inputs (inputs) to define source file used in the transcode job. There can be multiple inputs add in a job. These inputs will be concantenated together to create the output." + "documentation": "Use Inputs to define source file used in the transcode job. There can be multiple inputs add in a job. These inputs will be concantenated together to create the output." }, "KantarWatermark": { "shape": "KantarWatermarkSettings", @@ -7912,7 +7912,7 @@ "NielsenConfiguration": { "shape": "NielsenConfiguration", "locationName": "nielsenConfiguration", - "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting." + "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job." }, "NielsenNonLinearWatermark": { "shape": "NielsenNonLinearWatermarkSettings", @@ -7922,7 +7922,7 @@ "OutputGroups": { "shape": "__listOfOutputGroup", "locationName": "outputGroups", - "documentation": "(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" + "documentation": "Contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in is a group of settings that apply to the whole group. This required object depends on the value you set for Type. Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" }, "TimecodeConfig": { "shape": "TimecodeConfig", @@ -7932,7 +7932,7 @@ "TimedMetadataInsertion": { "shape": "TimedMetadataInsertion", "locationName": "timedMetadataInsertion", - "documentation": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." + "documentation": "Insert user-defined custom ID3 metadata at timecodes that you specify. In each output that you want to include this metadata, you must set ID3 metadata to Passthrough." } }, "documentation": "JobSettings contains all the transcode settings for a job." @@ -8058,7 +8058,7 @@ "Inputs": { "shape": "__listOfInputTemplate", "locationName": "inputs", - "documentation": "Use Inputs (inputs) to define the source file used in the transcode job. There can only be one input in a job template. Using the API, you can include multiple inputs when referencing a job template." + "documentation": "Use Inputs to define the source file used in the transcode job. There can only be one input in a job template. Using the API, you can include multiple inputs when referencing a job template." }, "KantarWatermark": { "shape": "KantarWatermarkSettings", @@ -8073,7 +8073,7 @@ "NielsenConfiguration": { "shape": "NielsenConfiguration", "locationName": "nielsenConfiguration", - "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting." + "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job." }, "NielsenNonLinearWatermark": { "shape": "NielsenNonLinearWatermarkSettings", @@ -8083,7 +8083,7 @@ "OutputGroups": { "shape": "__listOfOutputGroup", "locationName": "outputGroups", - "documentation": "(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" + "documentation": "Contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in is a group of settings that apply to the whole group. This required object depends on the value you set for Type. Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" }, "TimecodeConfig": { "shape": "TimecodeConfig", @@ -8093,7 +8093,7 @@ "TimedMetadataInsertion": { "shape": "TimedMetadataInsertion", "locationName": "timedMetadataInsertion", - "documentation": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." + "documentation": "Insert user-defined custom ID3 metadata at timecodes that you specify. In each output that you want to include this metadata, you must set ID3 metadata to Passthrough." } }, "documentation": "JobTemplateSettings contains all the transcode settings saved in the template that will be applied to jobs created from it." @@ -8595,7 +8595,7 @@ }, "M2tsAudioDuration": { "type": "string", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "enum": [ "DEFAULT_CODEC_DURATION", "MATCH_VIDEO_DURATION" @@ -8611,7 +8611,7 @@ }, "M2tsDataPtsControl": { "type": "string", - "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values.", + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value to allow all PTS values.", "enum": [ "AUTO", "ALIGN_TO_VIDEO" @@ -8643,7 +8643,7 @@ }, "M2tsForceTsVideoEbpOrder": { "type": "string", - "documentation": "Keep the default value (DEFAULT) unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force (FORCE).", + "documentation": "Keep the default value unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force.", "enum": [ "FORCE", "DEFAULT" @@ -8690,11 +8690,11 @@ "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated by ESAM." } }, - "documentation": "Settings for SCTE-35 signals from ESAM. Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "documentation": "Settings for SCTE-35 signals from ESAM. Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." }, "M2tsScte35Source": { "type": "string", - "documentation": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam).", + "documentation": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None. Also provide the ESAM XML as a string in the setting Signal processing notification XML. Also enable ESAM SCTE-35 (include the property scte35Esam).", "enum": [ "PASSTHROUGH", "NONE" @@ -8731,7 +8731,7 @@ "AudioDuration": { "shape": "M2tsAudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", @@ -8756,17 +8756,17 @@ "DataPTSControl": { "shape": "M2tsDataPtsControl", "locationName": "dataPTSControl", - "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values." + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value to allow all PTS values." }, "DvbNitSettings": { "shape": "DvbNitSettings", "locationName": "dvbNitSettings", - "documentation": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output." }, "DvbSdtSettings": { "shape": "DvbSdtSettings", "locationName": "dvbSdtSettings", - "documentation": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output." }, "DvbSubPids": { "shape": "__listOf__integerMin32Max8182", @@ -8776,7 +8776,7 @@ "DvbTdtSettings": { "shape": "DvbTdtSettings", "locationName": "dvbTdtSettings", - "documentation": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output." }, "DvbTeletextPid": { "shape": "__integerMin32Max8182", @@ -8801,7 +8801,7 @@ "ForceTsVideoEbpOrder": { "shape": "M2tsForceTsVideoEbpOrder", "locationName": "forceTsVideoEbpOrder", - "documentation": "Keep the default value (DEFAULT) unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force (FORCE)." + "documentation": "Keep the default value unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force." }, "FragmentTime": { "shape": "__doubleMin0", @@ -8846,7 +8846,7 @@ "PcrPid": { "shape": "__integerMin32Max8182", "locationName": "pcrPid", - "documentation": "Specify the packet identifier (PID) for the program clock reference (PCR) in this output. If you do not specify a value, the service will use the value for Video PID (VideoPid)." + "documentation": "Specify the packet identifier (PID) for the program clock reference (PCR) in this output. If you do not specify a value, the service will use the value for Video PID." }, "PmtInterval": { "shape": "__integerMin0Max1000", @@ -8866,7 +8866,7 @@ "ProgramNumber": { "shape": "__integerMin0Max65535", "locationName": "programNumber", - "documentation": "Use Program number (programNumber) to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data." + "documentation": "Use Program number to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data." }, "RateMode": { "shape": "M2tsRateMode", @@ -8876,7 +8876,7 @@ "Scte35Esam": { "shape": "M2tsScte35Esam", "locationName": "scte35Esam", - "documentation": "Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "documentation": "Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." }, "Scte35Pid": { "shape": "__integerMin32Max8182", @@ -8886,7 +8886,7 @@ "Scte35Source": { "shape": "M2tsScte35Source", "locationName": "scte35Source", - "documentation": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam)." + "documentation": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None. Also provide the ESAM XML as a string in the setting Signal processing notification XML. Also enable ESAM SCTE-35 (include the property scte35Esam)." }, "SegmentationMarkers": { "shape": "M2tsSegmentationMarkers", @@ -8919,11 +8919,11 @@ "documentation": "Specify the packet identifier (PID) of the elementary video stream in the transport stream." } }, - "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." + "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." }, "M3u8AudioDuration": { "type": "string", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "enum": [ "DEFAULT_CODEC_DURATION", "MATCH_VIDEO_DURATION" @@ -8931,7 +8931,7 @@ }, "M3u8DataPtsControl": { "type": "string", - "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values.", + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value AUTO to allow all PTS values.", "enum": [ "AUTO", "ALIGN_TO_VIDEO" @@ -8955,7 +8955,7 @@ }, "M3u8Scte35Source": { "type": "string", - "documentation": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers (adMarkers) if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml).", + "documentation": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None if you don't want manifest conditioning. Choose Passthrough and choose Ad markers if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML.", "enum": [ "PASSTHROUGH", "NONE" @@ -8967,7 +8967,7 @@ "AudioDuration": { "shape": "M3u8AudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", @@ -8982,7 +8982,7 @@ "DataPTSControl": { "shape": "M3u8DataPtsControl", "locationName": "dataPTSControl", - "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values." + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value AUTO to allow all PTS values." }, "MaxPcrInterval": { "shape": "__integerMin0Max500", @@ -9037,12 +9037,12 @@ "Scte35Source": { "shape": "M3u8Scte35Source", "locationName": "scte35Source", - "documentation": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers (adMarkers) if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml)." + "documentation": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None if you don't want manifest conditioning. Choose Passthrough and choose Ad markers if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML." }, "TimedMetadata": { "shape": "TimedMetadata", "locationName": "timedMetadata", - "documentation": "Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 metadata to None (NONE) or leave blank." + "documentation": "Set ID3 metadata to Passthrough to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period, and Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: set ID3 metadata to None or leave blank." }, "TimedMetadataPid": { "shape": "__integerMin32Max8182", @@ -9125,7 +9125,7 @@ "StartTime": { "shape": "__stringMin11Max11Pattern01D20305D205D", "locationName": "startTime", - "documentation": "Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or HH:MM:SS;FF). Make sure that the timecode you provide here takes into account how you have set up your timecode configuration under both job settings and input settings. The simplest way to do that is to set both to start at 0. If you need to set up your job to follow timecodes embedded in your source that don't start at zero, make sure that you specify a start time that is after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html Find job-wide and input timecode configuration settings in your JSON job settings specification at settings>timecodeConfig>source and settings>inputs>timecodeSource." + "documentation": "Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or HH:MM:SS;FF). Make sure that the timecode you provide here takes into account how you have set up your timecode configuration under both job settings and input settings. The simplest way to do that is to set both to start at 0. If you need to set up your job to follow timecodes embedded in your source that don't start at zero, make sure that you specify a start time that is after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html" } }, "documentation": "Overlay motion graphics on top of your video. The motion graphics that you specify here appear on all outputs in all output groups. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html." @@ -9260,7 +9260,7 @@ "Channels": { "shape": "__integerMin1Max2", "locationName": "channels", - "documentation": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in the console will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2." + "documentation": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2." }, "SampleRate": { "shape": "__integerMin32000Max48000", @@ -9268,7 +9268,7 @@ "documentation": "Sample rate in hz." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2." + "documentation": "Required when you set Codec to the value MP2." }, "Mp3RateControlMode": { "type": "string", @@ -9289,7 +9289,7 @@ "Channels": { "shape": "__integerMin1Max2", "locationName": "channels", - "documentation": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." + "documentation": "Specify the number of channels in this output audio track. Choosing Mono gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." }, "RateControlMode": { "shape": "Mp3RateControlMode", @@ -9304,7 +9304,7 @@ "VbrQuality": { "shape": "__integerMin0Max9", "locationName": "vbrQuality", - "documentation": "Required when you set Bitrate control mode (rateControlMode) to VBR. Specify the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality)." + "documentation": "Required when you set Bitrate control mode to VBR. Specify the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality)." } }, "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3." @@ -9339,7 +9339,7 @@ "AudioDuration": { "shape": "CmfcAudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "CslgAtom": { "shape": "Mp4CslgAtom", @@ -9349,7 +9349,7 @@ "CttsVersion": { "shape": "__integerMin0Max1", "locationName": "cttsVersion", - "documentation": "Ignore this setting unless compliance to the CTTS box version specification matters in your workflow. Specify a value of 1 to set your CTTS box version to 1 and make your output compliant with the specification. When you specify a value of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. Keep the default value 0 to set your CTTS box version to 0. This can provide backward compatibility for some players and packagers." + "documentation": "Ignore this setting unless compliance to the CTTS box version specification matters in your workflow. Specify a value of 1 to set your CTTS box version to 1 and make your output compliant with the specification. When you specify a value of 1, you must also set CSLG atom to the value INCLUDE. Keep the default value 0 to set your CTTS box version to 0. This can provide backward compatibility for some players and packagers." }, "FreeSpaceBox": { "shape": "Mp4FreeSpaceBox", @@ -9371,7 +9371,7 @@ }, "MpdAccessibilityCaptionHints": { "type": "string", - "documentation": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest withCreates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.
Deleting Resource Record Sets
To delete a resource record set, you must specify all the same values that you specified when you created it.
Change Batches and Transactional Changes
The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.
For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.
If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.
Traffic Flow
To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.
Create, Delete, and Upsert
Use ChangeResourceRecordsSetsRequest to perform the following actions:
CREATE: Creates a resource record set that has the specified values.
DELETE: Deletes an existing resource record set that has the specified values.
UPSERT: If a resource set exists Route 53 updates it with the values in the request.
Syntaxes for Creating, Updating, and Deleting Resource Record Sets
The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.
For an example for each type of resource record set, see \"Examples.\"
Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.
Change Propagation to Route 53 DNS Servers
When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.
Limits on ChangeResourceRecordSets Requests
For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.
Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.
Deleting Resource Record Sets
To delete a resource record set, you must specify all the same values that you specified when you created it.
Change Batches and Transactional Changes
The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.
For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.
If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.
Traffic Flow
To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.
Create, Delete, and Upsert
Use ChangeResourceRecordsSetsRequest to perform the following actions:
CREATE: Creates a resource record set that has the specified values.
DELETE: Deletes an existing resource record set that has the specified values.
UPSERT: If a resource set exists Route 53 updates it with the values in the request.
Syntaxes for Creating, Updating, and Deleting Resource Record Sets
The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.
For an example for each type of resource record set, see \"Examples.\"
Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.
Change Propagation to Route 53 DNS Servers
When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers managing the hosted zone. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers managing the hosted zone within 60 seconds. For more information, see GetChange.
Limits on ChangeResourceRecordSets Requests
For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.
Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.
Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.
After you submit an CreateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance request completed successfully. For more information, see the State response element.
Returns the current status of a change batch request. The status is one of the following values:
PENDING indicates that the changes in this request have not propagated to all Amazon Route 53 DNS servers. This is the initial status of all change batch requests.
INSYNC indicates that the changes have propagated to all Route 53 DNS servers.
Returns the current status of a change batch request. The status is one of the following values:
PENDING indicates that the changes in this request have not propagated to all Amazon Route 53 DNS servers managing the hosted zone. This is the initial status of all change batch requests.
INSYNC indicates that the changes have propagated to all Route 53 DNS servers managing the hosted zone.
Gets information about a specified traffic policy instance.
After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.
In the Route 53 console, traffic policy instances are known as policy records.
Gets information about a specified traffic policy instance.
Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element.
In the Route 53 console, traffic policy instances are known as policy records.
Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask.
This call only supports querying public hosted zones.
" + "documentation":"Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask.
This call only supports querying public hosted zones.
The TestDnsAnswer returns information similar to what you would expect from the answer section of the dig command. Therefore, if you query for the name servers of a subdomain that point to the parent name servers, those will not be returned.
Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.
When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Route 53 performs the following operations:
Route 53 creates a new group of resource record sets based on the specified traffic policy. This is true regardless of how significant the differences are between the existing resource record sets and the new resource record sets.
When all of the new resource record sets have been created, Route 53 starts to respond to DNS queries for the root resource record set name (such as example.com) by using the new resource record sets.
Route 53 deletes the old group of resource record sets that are associated with the root resource record set name.
After you submit a UpdateTrafficPolicyInstance request, there's a brief delay while Route 53 creates the resource record sets that are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of updated traffic policy instance confirm that the UpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element.
Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.
When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Route 53 performs the following operations:
Route 53 creates a new group of resource record sets based on the specified traffic policy. This is true regardless of how significant the differences are between the existing resource record sets and the new resource record sets.
When all of the new resource record sets have been created, Route 53 starts to respond to DNS queries for the root resource record set name (such as example.com) by using the new resource record sets.
Route 53 deletes the old group of resource record sets that are associated with the root resource record set name.
If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet.
" + "documentation":"If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet.
If you are using a reusable delegation set to create a public hosted zone for a subdomain, make sure that the parent hosted zone doesn't use one or more of the same name servers. If you have overlapping nameservers, the operation will cause a ConflictingDomainsExist error.
A complex type that contains information about the request to create a public or private hosted zone.
" From 4453641766e5c93b8b469d55faf4e2f21b963592 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:24:56 +0000 Subject: [PATCH 041/270] AWS Cloud Control API Update: Updates the documentation for CreateResource. --- .../feature-AWSCloudControlAPI-6b1f7c2.json | 6 + .../codegen-resources/endpoint-rule-set.json | 399 ++--- .../codegen-resources/endpoint-tests.json | 1319 ++++------------- .../codegen-resources/service-2.json | 2 +- 4 files changed, 473 insertions(+), 1253 deletions(-) create mode 100644 .changes/next-release/feature-AWSCloudControlAPI-6b1f7c2.json diff --git a/.changes/next-release/feature-AWSCloudControlAPI-6b1f7c2.json b/.changes/next-release/feature-AWSCloudControlAPI-6b1f7c2.json new file mode 100644 index 000000000000..6199423b4571 --- /dev/null +++ b/.changes/next-release/feature-AWSCloudControlAPI-6b1f7c2.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Cloud Control API", + "contributor": "", + "description": "Updates the documentation for CreateResource." +} diff --git a/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-rule-set.json index e215956866ae..1812986e28a7 100644 --- a/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-tests.json index ab39a6b29bf7..d21e7072890e 100644 --- a/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,276 +1,133 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.eu-south-2.amazonaws.com" + "url": "https://cloudcontrolapi.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": false + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-gov-east-1.api.aws" + "url": "https://cloudcontrolapi.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-gov-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-gov-east-1.api.aws" + "url": "https://cloudcontrolapi.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-gov-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.me-central-1.api.aws" + "url": "https://cloudcontrolapi.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.me-central-1.amazonaws.com" + "url": "https://cloudcontrolapi.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.me-central-1.api.aws" + "url": "https://cloudcontrolapi.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.me-central-1.amazonaws.com" + "url": "https://cloudcontrolapi.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ca-central-1.api.aws" + "url": "https://cloudcontrolapi.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, "Region": "ca-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { @@ -281,74 +138,9 @@ } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "UseFIPS": true, + "UseDualStack": false } }, { @@ -359,1114 +151,495 @@ } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.sa-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-east-1.api.aws" + "url": "https://cloudcontrolapi.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-east-1.api.aws" + "url": "https://cloudcontrolapi.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://cloudcontrolapi.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.cn-north-1.amazonaws.com.cn" + "url": "https://cloudcontrolapi.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://cloudcontrolapi.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.cn-north-1.amazonaws.com.cn" + "url": "https://cloudcontrolapi-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-gov-west-1.api.aws" + "url": "https://cloudcontrolapi.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-gov-west-1.amazonaws.com" + "url": "https://cloudcontrolapi-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-gov-west-1.api.aws" + "url": "https://cloudcontrolapi.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-gov-west-1.amazonaws.com" + "url": "https://cloudcontrolapi-fips.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-1.api.aws" + "url": "https://cloudcontrolapi.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-1.amazonaws.com" + "url": "https://cloudcontrolapi-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-1.api.aws" + "url": "https://cloudcontrolapi-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-1.amazonaws.com" + "url": "https://cloudcontrolapi.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-2.api.aws" + "url": "https://cloudcontrolapi.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-2.amazonaws.com" + "url": "https://cloudcontrolapi.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-2.api.aws" + "url": "https://cloudcontrolapi-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-2.amazonaws.com" + "url": "https://cloudcontrolapi-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-3.api.aws" + "url": "https://cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-3.amazonaws.com" + "url": "https://cloudcontrolapi.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-3.api.aws" + "url": "https://cloudcontrolapi-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-3.amazonaws.com" + "url": "https://cloudcontrolapi.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-east-1.api.aws" + "url": "https://cloudcontrolapi-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-east-1.amazonaws.com" + "url": "https://cloudcontrolapi-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-east-1.api.aws" + "url": "https://cloudcontrolapi.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-east-2.api.aws" + "url": "https://cloudcontrolapi-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-east-2.api.aws" + "url": "https://cloudcontrolapi.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://cloudcontrolapi-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://cloudcontrolapi.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1476,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1488,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloudcontrol/src/main/resources/codegen-resources/service-2.json b/services/cloudcontrol/src/main/resources/codegen-resources/service-2.json index b59888f2c053..9e34fe763130 100644 --- a/services/cloudcontrol/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudcontrol/src/main/resources/codegen-resources/service-2.json @@ -286,7 +286,7 @@ }, "DesiredState":{ "shape":"Properties", - "documentation":"Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values.
Cloud Control API currently supports JSON as a structured data format.
<p>Specify the desired state as one of the following:</p> <ul> <li> <p>A JSON blob</p> </li> <li> <p>A local path containing the desired state in JSON data format</p> </li> </ul> <p>For more information, see <a href="https://docs.aws.amazon.com/cloudcontrolapi/latest/userguide/resource-operations-create.html#resource-operations-create-desiredstate">Composing the desired state of the resource</a> in the <i>Amazon Web Services Cloud Control API User Guide</i>.</p> <p>For more information about the properties of a specific resource, refer to the related topic for the resource in the <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html">Resource and property types reference</a> in the <i>CloudFormation Users Guide</i>.</p> "
+ "documentation":"Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values.
Cloud Control API currently supports JSON as a structured data format.
Specify the desired state as one of the following:
A JSON blob
A local path containing the desired state in JSON data format
For more information, see Composing the desired state of the resource in the Amazon Web Services Cloud Control API User Guide.
For more information about the properties of a specific resource, refer to the related topic for the resource in the Resource and property types reference in the CloudFormation Users Guide.
" } } }, From 5d22a963dc8d7524ea75f962f39fc37218af9f1d Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:24:48 +0000 Subject: [PATCH 042/270] AWS EntityResolution Update: AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information. --- .../feature-AWSEntityResolution-f6e7aec.json | 6 + services/entityresolution/pom.xml | 60 + .../codegen-resources/endpoint-rule-set.json | 350 ++++ .../codegen-resources/endpoint-tests.json | 295 ++++ .../codegen-resources/paginators-1.json | 22 + .../codegen-resources/service-2.json | 1543 +++++++++++++++++ 6 files changed, 2276 insertions(+) create mode 100644 .changes/next-release/feature-AWSEntityResolution-f6e7aec.json create mode 100644 services/entityresolution/pom.xml create mode 100644 services/entityresolution/src/main/resources/codegen-resources/endpoint-rule-set.json create mode 100644 services/entityresolution/src/main/resources/codegen-resources/endpoint-tests.json create mode 100644 services/entityresolution/src/main/resources/codegen-resources/paginators-1.json create mode 100644 services/entityresolution/src/main/resources/codegen-resources/service-2.json diff --git a/.changes/next-release/feature-AWSEntityResolution-f6e7aec.json b/.changes/next-release/feature-AWSEntityResolution-f6e7aec.json new file mode 100644 index 000000000000..a391190e125e --- /dev/null +++ b/.changes/next-release/feature-AWSEntityResolution-f6e7aec.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS EntityResolution", + "contributor": "", + "description": "AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information." +} diff --git a/services/entityresolution/pom.xml b/services/entityresolution/pom.xml new file mode 100644 index 000000000000..93e0e30073ab --- /dev/null +++ b/services/entityresolution/pom.xml @@ -0,0 +1,60 @@ + + + +Creates a MatchingWorkflow object which stores the configuration of the data processing job to be run. It is important to note that there should not be a pre-existing MatchingWorkflow with the same name. To modify an existing workflow, utilize the UpdateMatchingWorkflow API.
Creates a schema mapping, which defines the schema of the input customer records table. The SchemaMapping also provides Entity Resolution with some metadata about the table, such as the attribute types of the columns and which columns to match on.
Deletes the MatchingWorkflow with a given name. This operation will succeed even if a workflow with the given name does not exist.
Deletes the SchemaMapping with a given name. This operation will succeed even if a schema with the given name does not exist. This operation will fail if there is a DataIntegrationWorkflow object that references the SchemaMapping in the workflow's InputSourceConfig.
Returns the corresponding Match ID of a customer record if the record has been processed.
" + }, + "GetMatchingJob":{ + "name":"GetMatchingJob", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}/jobs/{jobId}", + "responseCode":200 + }, + "input":{"shape":"GetMatchingJobInput"}, + "output":{"shape":"GetMatchingJobOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"Gets the status, metrics, and errors (if there are any) that are associated with a job.
" + }, + "GetMatchingWorkflow":{ + "name":"GetMatchingWorkflow", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}", + "responseCode":200 + }, + "input":{"shape":"GetMatchingWorkflowInput"}, + "output":{"shape":"GetMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"Returns the MatchingWorkflow with a given name, if it exists.
Returns the SchemaMapping of a given name.
" + }, + "ListMatchingJobs":{ + "name":"ListMatchingJobs", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}/jobs", + "responseCode":200 + }, + "input":{"shape":"ListMatchingJobsInput"}, + "output":{"shape":"ListMatchingJobsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"Lists all jobs for a given workflow.
" + }, + "ListMatchingWorkflows":{ + "name":"ListMatchingWorkflows", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows", + "responseCode":200 + }, + "input":{"shape":"ListMatchingWorkflowsInput"}, + "output":{"shape":"ListMatchingWorkflowsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"Returns a list of all the MatchingWorkflows that have been created for an AWS account.
Returns a list of all the SchemaMappings that have been created for an AWS account.
Displays the tags associated with an AWS Entity Resolution resource. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged.
Starts the MatchingJob of a workflow. The workflow must have previously been created using the CreateMatchingWorkflow endpoint.
Assigns one or more tags (key-value pairs) to the specified AWS Entity Resolution resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged. Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters. You can use the TagResource action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.
Removes one or more tags from the specified AWS Entity Resolution resource. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged.
Updates an existing MatchingWorkflow. This method is identical to CreateMatchingWorkflow, except it uses an HTTP PUT request instead of a POST request, and the MatchingWorkflow must already exist for the method to succeed.
You do not have sufficient access to perform this action. HTTP Status Code: 403
The request could not be processed because of conflict in the current state of the resource. Example: Workflow already exists, Schema already exists, Workflow is currently running, etc. HTTP Status Code: 400
A description of the workflow.
" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"An object which defines an incremental run type and has only incrementalRunType as a field.
A list of InputSource objects, which have the fields InputSourceARN and SchemaName.
A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.
An object which defines the resolutionType and the ruleBasedProperties
The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.
" + }, + "tags":{ + "shape":"TagMap", + "documentation":"The tags used to organize, track, or control access for this resource.
" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"The name of the workflow. There cannot be multiple DataIntegrationWorkflows with the same name.
A description of the workflow.
" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"An object which defines an incremental run type and has only incrementalRunType as a field.
A list of InputSource objects, which have the fields InputSourceARN and SchemaName.
A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.
An object which defines the resolutionType and the ruleBasedProperties
The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.
" + }, + "workflowArn":{ + "shape":"MatchingWorkflowArn", + "documentation":"The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.
The name of the workflow.
" + } + } + }, + "CreateSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "description":{ + "shape":"Description", + "documentation":"A description of the schema.
" + }, + "mappedInputFields":{ + "shape":"SchemaInputAttributes", + "documentation":"A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information that Entity Resolution uses for matching.
The name of the schema. There cannot be multiple SchemaMappings with the same name.
The tags used to organize, track, or control access for this resource.
" + } + } + }, + "CreateSchemaMappingOutput":{ + "type":"structure", + "required":[ + "description", + "mappedInputFields", + "schemaArn", + "schemaName" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"A description of the schema.
" + }, + "mappedInputFields":{ + "shape":"SchemaInputAttributes", + "documentation":"A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information that Entity Resolution uses for matching.
The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.
The name of the schema.
" + } + } + }, + "DeleteMatchingWorkflowInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "workflowName":{ + "shape":"EntityName", + "documentation":"The name of the workflow to be retrieved.
", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "DeleteMatchingWorkflowOutput":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"A successful operation message.
" + } + } + }, + "DeleteSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "schemaName":{ + "shape":"EntityName", + "documentation":"The name of the schema to delete.
", + "location":"uri", + "locationName":"schemaName" + } + } + }, + "DeleteSchemaMappingOutput":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"A successful operation message.
" + } + } + }, + "Description":{ + "type":"string", + "max":255, + "min":0 + }, + "EntityName":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9-]*$" + }, + "ErrorDetails":{ + "type":"structure", + "members":{ + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"The error message from the job, if there is one.
" + } + }, + "documentation":"An object containing an error message, if there was an error.
" + }, + "ErrorMessage":{ + "type":"string", + "max":2048, + "min":1 + }, + "ExceedsLimitException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"The request was rejected because it attempted to create resources beyond the current AWS Entity Resolution account limits. The error message describes the limit exceeded. HTTP Status Code: 402
The record to fetch the Match ID for.
" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"The name of the workflow.
", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchIdOutput":{ + "type":"structure", + "members":{ + "matchId":{ + "shape":"String", + "documentation":"The unique identifiers for this group of match records.
" + } + } + }, + "GetMatchingJobInput":{ + "type":"structure", + "required":[ + "jobId", + "workflowName" + ], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"The ID of the job.
", + "location":"uri", + "locationName":"jobId" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"The name of the workflow.
", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchingJobOutput":{ + "type":"structure", + "required":[ + "jobId", + "startTime", + "status" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"The time at which the job has finished.
" + }, + "errorDetails":{ + "shape":"ErrorDetails", + "documentation":"An object containing an error message, if there was an error.
" + }, + "jobId":{ + "shape":"JobId", + "documentation":"The ID of the job.
" + }, + "metrics":{ + "shape":"JobMetrics", + "documentation":"Metrics associated with the execution, specifically total records processed, unique IDs generated, and records the execution skipped.
" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"The time at which the job was started.
" + }, + "status":{ + "shape":"JobStatus", + "documentation":"The current status of the job. Either running, succeeded, queued, or failed.
The name of the workflow.
", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchingWorkflowOutput":{ + "type":"structure", + "required":[ + "createdAt", + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "updatedAt", + "workflowArn", + "workflowName" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the workflow was created.
" + }, + "description":{ + "shape":"Description", + "documentation":"A description of the workflow.
" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"An object which defines an incremental run type and has only incrementalRunType as a field.
A list of InputSource objects, which have the fields InputSourceARN and SchemaName.
A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.
An object which defines the resolutionType and the ruleBasedProperties
The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access resources on your behalf.
" + }, + "tags":{ + "shape":"TagMap", + "documentation":"The tags used to organize, track, or control access for this resource.
" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the workflow was last updated.
" + }, + "workflowArn":{ + "shape":"MatchingWorkflowArn", + "documentation":"The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.
The name of the workflow.
" + } + } + }, + "GetSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "schemaName":{ + "shape":"EntityName", + "documentation":"The name of the schema to be retrieved.
", + "location":"uri", + "locationName":"schemaName" + } + } + }, + "GetSchemaMappingOutput":{ + "type":"structure", + "required":[ + "createdAt", + "mappedInputFields", + "schemaArn", + "schemaName", + "updatedAt" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the SchemaMapping was created.
A description of the schema.
" + }, + "mappedInputFields":{ + "shape":"SchemaInputAttributes", + "documentation":"A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information Venice uses for matching.
The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.
" + }, + "schemaName":{ + "shape":"EntityName", + "documentation":"The name of the schema.
" + }, + "tags":{ + "shape":"TagMap", + "documentation":"The tags used to organize, track, or control access for this resource.
" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the SchemaMapping was last updated.
The type of incremental run. It takes only one value: IMMEDIATE.
An object which defines an incremental run type and has only incrementalRunType as a field.
Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in the input table is in a format of 1234567890, Entity Resolution will normalize this field in the output to (123)-456-7890.
An Glue table ARN for the input source table.
" + }, + "schemaName":{ + "shape":"EntityName", + "documentation":"The name of the schema to be retrieved.
" + } + }, + "documentation":"An object containing InputSourceARN, SchemaName, and ApplyNormalization.
This exception occurs when there is an internal failure in the AWS Entity Resolution service. HTTP Status Code: 500
The total number of input records.
" + }, + "matchIDs":{ + "shape":"Integer", + "documentation":"The total number of matchIDs generated.
The total number of records that did not get processed,
" + }, + "totalRecordsProcessed":{ + "shape":"Integer", + "documentation":"The total number of records processed.
" + } + }, + "documentation":"An object containing InputRecords, TotalRecordsProcessed, MatchIDs, and RecordsNotProcessed.
The time at which the job has finished.
" + }, + "jobId":{ + "shape":"JobId", + "documentation":"The ID of the job.
" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"The time at which the job was started.
" + }, + "status":{ + "shape":"JobStatus", + "documentation":"The current status of the job. Either running, succeeded, queued, or failed.
An object containing the JobId, Status, StartTime, and EndTime of a job.
The maximum number of objects returned per page.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The pagination token from the previous ListSchemaMappings API call.
The name of the workflow to be retrieved.
", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "ListMatchingJobsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListMatchingJobsOutput":{ + "type":"structure", + "members":{ + "jobs":{ + "shape":"JobList", + "documentation":"A list of JobSummary objects, each of which contain the ID, status, start time, and end time of a job.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The pagination token from the previous ListSchemaMappings API call.
The maximum number of objects returned per page.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The pagination token from the previous ListSchemaMappings API call.
The pagination token from the previous ListSchemaMappings API call.
A list of MatchingWorkflowSummary objects, each of which contain the fields WorkflowName, WorkflowArn, CreatedAt, and UpdatedAt.
The maximum number of objects returned per page.
", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"The pagination token from the previous ListSchemaMappings API call.
The pagination token from the previous ListDomains API call.
A list of SchemaMappingSummary objects, each of which contain the fields SchemaName, SchemaArn, CreatedAt, UpdatedAt.
The ARN of the resource for which you want to view tags.
", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"The tags used to organize, track, or control access for this resource.
" + } + } + }, + "MatchingWorkflowArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-us-gov|aws-cn):entityresolution:.*:[0-9]+:(matchingworkflow/.*)$" + }, + "MatchingWorkflowList":{ + "type":"list", + "member":{"shape":"MatchingWorkflowSummary"} + }, + "MatchingWorkflowSummary":{ + "type":"structure", + "required":[ + "createdAt", + "updatedAt", + "workflowArn", + "workflowName" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the workflow was created.
" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the workflow was last updated.
" + }, + "workflowArn":{ + "shape":"MatchingWorkflowArn", + "documentation":"The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.
The name of the workflow.
" + } + }, + "documentation":"A list of MatchingWorkflowSummary objects, each of which contain the fields WorkflowName, WorkflowArn, CreatedAt, UpdatedAt.
Enables the ability to hash the column values in the output.
" + }, + "name":{ + "shape":"AttributeName", + "documentation":"A name of a column to be written to the output. This must be an InputField name in the schema mapping.
A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.
Customer KMS ARN for encryption at rest. If not provided, system will use an Entity Resolution managed KMS key.
" + }, + "applyNormalization":{ + "shape":"Boolean", + "documentation":"Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in the input table is in a format of 1234567890, Entity Resolution will normalize this field in the output to (123)-456-7890.
A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.
The S3 path to which Entity Resolution will write the output table.
" + } + }, + "documentation":"A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.
There are two types of matching, RULE_MATCHING and ML_MATCHING
An object which defines the list of matching rules to run and has a field Rules, which is a list of rule objects.
An object which defines the resolutionType and the ruleBasedProperties
The resource could not be found. HTTP Status Code: 404
A list of MatchingKeys. The MatchingKeys must have been defined in the SchemaMapping. Two records are considered to match according to this rule if all of the MatchingKeys match.
A name for the matching rule.
" + } + }, + "documentation":"An object containing RuleName, and MatchingKeys.
You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. When choosing MANY_TO_MANY, the system can match attribute across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email type. When choosing ONE_TO_ONE the system can only match if the sub-types are exact matches. For example, only when the value of the Email field of Profile A and the value of the Email field of Profile B matches, the two profiles are matched on the Email type.
A list of Rule objects, each of which have fields RuleName and MatchingKeys.
An object which defines the list of matching rules to run and has a field Rules, which is a list of rule objects.
A string containing the field name.
" + }, + "groupName":{ + "shape":"AttributeName", + "documentation":"Instruct Entity Resolution to combine several columns into a unified column with the identical attribute type. For example, when working with columns such as first_name, middle_name, and last_name, assigning them a common GroupName will prompt Entity Resolution to concatenate them into a single value.
A key that allows grouping of multiple input attributes into a unified matching group. For example, let's consider a scenario where the source table contains various addresses, such as business_address and shipping_address. By assigning the MatchKey Address' to both attributes, Entity Resolution will match records across these fields to create a consolidated matching group. If no MatchKey is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.
The type of the attribute, selected from a list of values.
" + } + }, + "documentation":"An object containing FieldField, Type, GroupName, and MatchKey.
The timestamp of when the SchemaMapping was created.
The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.
The name of the schema.
" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"The timestamp of when the SchemaMapping was last updated.
An object containing SchemaName, SchemaArn, CreatedAt, andUpdatedAt.
The name of the matching job to be retrieved.
", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "StartMatchingJobOutput":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"The ID of the job.
" + } + } + }, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"VeniceGlobalArn", + "documentation":"The ARN of the resource for which you want to view tags.
", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"The tags used to organize, track, or control access for this resource.
" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"The request was denied due to request throttling. HTTP Status Code: 429
The ARN of the resource for which you want to untag.
", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"The list of tag keys to remove from the resource.
", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateMatchingWorkflowInput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowName" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"A description of the workflow.
" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"An object which defines an incremental run type and has only incrementalRunType as a field.
A list of InputSource objects, which have the fields InputSourceARN and SchemaName.
A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.
An object which defines the resolutionType and the ruleBasedProperties
The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.
" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"The name of the workflow to be retrieved.
", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "UpdateMatchingWorkflowOutput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowName" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"A description of the workflow.
" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"An object which defines an incremental run type and has only incrementalRunType as a field.
A list of InputSource objects, which have the fields InputSourceARN and SchemaName.
A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.
An object which defines the resolutionType and the ruleBasedProperties
The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.
" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"The name of the workflow.
" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"The input fails to satisfy the constraints specified by AWS Entity Resolution. HTTP Status Code: 400
Welcome to the AWS Entity Resolution API Reference.
AWS Entity Resolution is an AWS service that provides pre-configured entity resolution capabilities that enable developers and analysts at advertising and marketing companies to build an accurate and complete view of their consumers.
With AWS Entity Resolution, you have the ability to match source records containing consumer identifiers, such as name, email address, and phone number. This holds true even when these records have incomplete or conflicting identifiers. For example, AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system, which includes account information like first name, last name, postal address, phone number, and email address, with a source record from a marketing system containing campaign information, such as username and email address.
To learn more about AWS Entity Resolution concepts, procedures, and best practices, see the AWS Entity Resolution User Guide.
" +} From 7cf0743fe1565fe29481b236aec758f4d178cfd0 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:24:58 +0000 Subject: [PATCH 043/270] AWS Glue Update: Release Glue Studio Snowflake Connector Node for SDK/CLI --- .../next-release/feature-AWSGlue-293a6f3.json | 6 + .../codegen-resources/service-2.json | 144 +++++++++++++++++- 2 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-AWSGlue-293a6f3.json diff --git a/.changes/next-release/feature-AWSGlue-293a6f3.json b/.changes/next-release/feature-AWSGlue-293a6f3.json new file mode 100644 index 000000000000..9e58041ecd56 --- /dev/null +++ b/.changes/next-release/feature-AWSGlue-293a6f3.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Release Glue Studio Snowflake Connector Node for SDK/CLI" +} diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 7ccd1d6189e0..29530617b177 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -3668,7 +3668,7 @@ }, "Data":{ "shape":"AmazonRedshiftNodeData", - "documentation":"Specifies the data of the Amazon Reshift target node.
" + "documentation":"Specifies the data of the Amazon Redshift target node.
" }, "Inputs":{ "shape":"OneInput", @@ -5381,6 +5381,14 @@ "Recipe":{ "shape":"Recipe", "documentation":"Specifies a Glue DataBrew recipe node.
" + }, + "SnowflakeSource":{ + "shape":"SnowflakeSource", + "documentation":"Specifies a Snowflake data source.
" + }, + "SnowflakeTarget":{ + "shape":"SnowflakeTarget", + "documentation":"Specifies a target that writes to a Snowflake data source.
" } }, "documentation":" CodeGenConfigurationNode enumerates all valid Node types. One and only one of its member variables can be populated.
Specifies skewed values in a table. Skewed values are those that occur with very high frequency.
" }, + "SnowflakeNodeData":{ + "type":"structure", + "members":{ + "SourceType":{ + "shape":"GenericLimitedString", + "documentation":"Specifies how retrieved data is specified. Valid values: \"table\", \"query\".
Specifies a Glue Data Catalog Connection to a Snowflake endpoint.
" + }, + "Schema":{ + "shape":"GenericString", + "documentation":"Specifies a Snowflake database schema for your node to use.
" + }, + "Table":{ + "shape":"GenericString", + "documentation":"Specifies a Snowflake table for your node to use.
" + }, + "Database":{ + "shape":"GenericString", + "documentation":"Specifies a Snowflake database for your node to use.
" + }, + "TempDir":{ + "shape":"EnclosedInStringProperty", + "documentation":"Not currently used.
" + }, + "IamRole":{ + "shape":"Option", + "documentation":"Not currently used.
" + }, + "AdditionalOptions":{ + "shape":"AdditionalOptions", + "documentation":"Specifies additional options passed to the Snowflake connector. If options are specified elsewhere in this node, this will take precedence.
" + }, + "SampleQuery":{ + "shape":"GenericString", + "documentation":"A SQL string used to retrieve data with the query sourcetype.
A SQL string run before the Snowflake connector performs its standard actions.
" + }, + "PostAction":{ + "shape":"GenericString", + "documentation":"A SQL string run after the Snowflake connector performs its standard actions.
" + }, + "Action":{ + "shape":"GenericString", + "documentation":"Specifies what action to take when writing to a table with preexisting data. Valid values: append, merge, truncate, drop.
Used when Action is append. Specifies the resolution behavior when a row already exists. If true, preexisting rows will be updated. If false, those rows will be inserted.
Specifies a merge action. Valid values: simple, custom. If simple, merge behavior is defined by MergeWhenMatched and MergeWhenNotMatched. If custom, defined by MergeClause.
Specifies how to resolve records that match preexisting data when merging. Valid values: update, delete.
Specifies how to process records that do not match preexisting data when merging. Valid values: insert, none.
A SQL statement that specifies a custom merge behavior.
" + }, + "StagingTable":{ + "shape":"GenericString", + "documentation":"The name of a staging table used when performing merge or upsert append actions. Data is written to this table, then moved to table by a generated postaction.
Specifies the columns combined to identify a record when detecting matches for merges and upserts. A list of structures with value, label and description keys. Each structure describes a column.
Specifies whether automatic query pushdown is enabled. If pushdown is enabled, then when a query is run on Spark, if part of the query can be \"pushed down\" to the Snowflake server, it is pushed down. This improves performance of some queries.
" + }, + "TableSchema":{ + "shape":"OptionList", + "documentation":"Manually defines the target schema for the node. A list of structures with value , label and description keys. Each structure defines a column.
Specifies configuration for Snowflake nodes in Glue Studio.
" + }, + "SnowflakeSource":{ + "type":"structure", + "required":[ + "Name", + "Data" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"The name of the Snowflake data source.
" + }, + "Data":{ + "shape":"SnowflakeNodeData", + "documentation":"Configuration for the Snowflake data source.
" + }, + "OutputSchemas":{ + "shape":"GlueSchemas", + "documentation":"Specifies user-defined schemas for your output data.
" + } + }, + "documentation":"Specifies a Snowflake data source.
" + }, + "SnowflakeTarget":{ + "type":"structure", + "required":[ + "Name", + "Data" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"The name of the Snowflake target.
" + }, + "Data":{ + "shape":"SnowflakeNodeData", + "documentation":"Specifies the data of the Snowflake target node.
" + }, + "Inputs":{ + "shape":"OneInput", + "documentation":"The nodes that are inputs to the data target.
" + } + }, + "documentation":"Specifies a Snowflake target.
" + }, "Sort":{ "type":"string", "enum":[ From b5fca390ac8fdc0dc82203825d3d4f13f590c4c7 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:26:16 +0000 Subject: [PATCH 044/270] Updated endpoints.json and partitions.json. --- .../feature-AWSSDKforJavav2-0443982.json | 6 ++ .../regions/internal/region/endpoints.json | 79 ++++++++++++------- 2 files changed, 56 insertions(+), 29 deletions(-) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json new file mode 100644 index 000000000000..e5b5ee3ca5e3 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." +} diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 330334797585..186f785124e8 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -14430,67 +14430,67 @@ "endpoints" : { "af-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.af-south-1.amazonaws.com", + "hostname" : "servicediscovery.af-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-east-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-east-1.amazonaws.com", + "hostname" : "servicediscovery.ap-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-1.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-2.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-3" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-3.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-3.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-south-1.amazonaws.com", + "hostname" : "servicediscovery.ap-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-south-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-south-2.amazonaws.com", + "hostname" : "servicediscovery.ap-south-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-1.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-2.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-3" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-3.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-3.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-4" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-4.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-4.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14499,7 +14499,10 @@ "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.ca-central-1.amazonaws.com", + "hostname" : "servicediscovery-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14512,67 +14515,67 @@ }, "eu-central-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-central-1.amazonaws.com", + "hostname" : "servicediscovery.eu-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-central-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-central-2.amazonaws.com", + "hostname" : "servicediscovery.eu-central-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-north-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-north-1.amazonaws.com", + "hostname" : "servicediscovery.eu-north-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-south-1.amazonaws.com", + "hostname" : "servicediscovery.eu-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-south-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-south-2.amazonaws.com", + "hostname" : "servicediscovery.eu-south-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-1.amazonaws.com", + "hostname" : "servicediscovery.eu-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-2.amazonaws.com", + "hostname" : "servicediscovery.eu-west-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-3" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-3.amazonaws.com", + "hostname" : "servicediscovery.eu-west-3.api.aws", "tags" : [ "dualstack" ] } ] }, "me-central-1" : { "variants" : [ { - "hostname" : "servicediscovery.me-central-1.amazonaws.com", + "hostname" : "servicediscovery.me-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "me-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.me-south-1.amazonaws.com", + "hostname" : "servicediscovery.me-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "sa-east-1" : { "variants" : [ { - "hostname" : "servicediscovery.sa-east-1.amazonaws.com", + "hostname" : "servicediscovery.sa-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14581,7 +14584,10 @@ "hostname" : "servicediscovery-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-east-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14597,7 +14603,10 @@ "hostname" : "servicediscovery-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-east-2.amazonaws.com", + "hostname" : "servicediscovery-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-east-2.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14613,7 +14622,10 @@ "hostname" : "servicediscovery-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-west-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14629,7 +14641,10 @@ "hostname" : "servicediscovery-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-west-2.amazonaws.com", + "hostname" : "servicediscovery-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-west-2.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -18996,13 +19011,13 @@ "endpoints" : { "cn-north-1" : { "variants" : [ { - "hostname" : "servicediscovery.cn-north-1.amazonaws.com.cn", + "hostname" : "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", "tags" : [ "dualstack" ] } ] }, "cn-northwest-1" : { "variants" : [ { - "hostname" : "servicediscovery.cn-northwest-1.amazonaws.com.cn", + "hostname" : "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", "tags" : [ "dualstack" ] } ] } @@ -22879,6 +22894,9 @@ }, "us-gov-east-1" : { "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] }, { @@ -22895,6 +22913,9 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] }, { From e076270090d49d918860595af86ff0181319e6d3 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 26 Jul 2023 18:27:22 +0000 Subject: [PATCH 045/270] Release 2.20.112. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.112.json | 72 +++++++++++++++++++ .../feature-AWSCloudControlAPI-6b1f7c2.json | 6 -- ...ture-AWSElementalMediaConvert-7f90871.json | 6 -- .../feature-AWSEntityResolution-f6e7aec.json | 6 -- .../next-release/feature-AWSGlue-293a6f3.json | 6 -- .../feature-AWSSDKforJavav2-0443982.json | 6 -- .../feature-AmazonHealthLake-3586f3c.json | 6 -- ...-AmazonManagedBlockchainQuery-b049e7a.json | 6 -- .../feature-AmazonOmics-77634b7.json | 6 -- .../feature-AmazonPolly-c852dc1.json | 6 -- .../feature-AmazonRoute53-3f67164.json | 6 -- ...e-OpenSearchServiceServerless-3bc0d2a.json | 6 -- CHANGELOG.md | 47 +++++++++++- README.md | 8 +-- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 12 +++- bom-internal/pom.xml | 2 +- bom/pom.xml | 12 +++- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 4 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- 430 files changed, 560 insertions(+), 487 deletions(-) create mode 100644 .changes/2.20.112.json delete mode 100644 .changes/next-release/feature-AWSCloudControlAPI-6b1f7c2.json delete mode 100644 .changes/next-release/feature-AWSElementalMediaConvert-7f90871.json delete mode 100644 .changes/next-release/feature-AWSEntityResolution-f6e7aec.json delete mode 100644 .changes/next-release/feature-AWSGlue-293a6f3.json delete mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json delete mode 100644 .changes/next-release/feature-AmazonHealthLake-3586f3c.json delete mode 100644 .changes/next-release/feature-AmazonManagedBlockchainQuery-b049e7a.json delete mode 100644 .changes/next-release/feature-AmazonOmics-77634b7.json delete mode 100644 .changes/next-release/feature-AmazonPolly-c852dc1.json delete mode 100644 .changes/next-release/feature-AmazonRoute53-3f67164.json delete mode 100644 .changes/next-release/feature-OpenSearchServiceServerless-3bc0d2a.json diff --git a/.changes/2.20.112.json b/.changes/2.20.112.json new file mode 100644 index 000000000000..b324d8bea52e --- /dev/null +++ b/.changes/2.20.112.json @@ -0,0 +1,72 @@ +{ + "version": "2.20.112", + "date": "2023-07-26", + "entries": [ + { + "type": "feature", + "category": "AWS Cloud Control API", + "contributor": "", + "description": "Updates the documentation for CreateResource." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes general updates to user documentation." + }, + { + "type": "feature", + "category": "AWS EntityResolution", + "contributor": "", + "description": "AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Release Glue Studio Snowflake Connector Node for SDK/CLI" + }, + { + "type": "feature", + "category": "Amazon HealthLake", + "contributor": "", + "description": "Updating the HealthLake service documentation." + }, + { + "type": "feature", + "category": "Amazon Managed Blockchain Query", + "contributor": "", + "description": "Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs." + }, + { + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "The service is renaming as a part of AWS Health." + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds 1 new voice - Lisa (nl-BE)" + }, + { + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "Update that corrects the documents for received feedback." + }, + { + "type": "feature", + "category": "OpenSearch Service Serverless", + "contributor": "", + "description": "This release adds new collection type VectorSearch." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AWSCloudControlAPI-6b1f7c2.json b/.changes/next-release/feature-AWSCloudControlAPI-6b1f7c2.json deleted file mode 100644 index 6199423b4571..000000000000 --- a/.changes/next-release/feature-AWSCloudControlAPI-6b1f7c2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Cloud Control API", - "contributor": "", - "description": "Updates the documentation for CreateResource." -} diff --git a/.changes/next-release/feature-AWSElementalMediaConvert-7f90871.json b/.changes/next-release/feature-AWSElementalMediaConvert-7f90871.json deleted file mode 100644 index c9ad3ec656f9..000000000000 --- a/.changes/next-release/feature-AWSElementalMediaConvert-7f90871.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Elemental MediaConvert", - "contributor": "", - "description": "This release includes general updates to user documentation." -} diff --git a/.changes/next-release/feature-AWSEntityResolution-f6e7aec.json b/.changes/next-release/feature-AWSEntityResolution-f6e7aec.json deleted file mode 100644 index a391190e125e..000000000000 --- a/.changes/next-release/feature-AWSEntityResolution-f6e7aec.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS EntityResolution", - "contributor": "", - "description": "AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information." -} diff --git a/.changes/next-release/feature-AWSGlue-293a6f3.json b/.changes/next-release/feature-AWSGlue-293a6f3.json deleted file mode 100644 index 9e58041ecd56..000000000000 --- a/.changes/next-release/feature-AWSGlue-293a6f3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Glue", - "contributor": "", - "description": "Release Glue Studio Snowflake Connector Node for SDK/CLI" -} diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json deleted file mode 100644 index e5b5ee3ca5e3..000000000000 --- a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS SDK for Java v2", - "contributor": "", - "description": "Updated endpoint and partition metadata." -} diff --git a/.changes/next-release/feature-AmazonHealthLake-3586f3c.json b/.changes/next-release/feature-AmazonHealthLake-3586f3c.json deleted file mode 100644 index ef00eda988b4..000000000000 --- a/.changes/next-release/feature-AmazonHealthLake-3586f3c.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon HealthLake", - "contributor": "", - "description": "Updating the HealthLake service documentation." -} diff --git a/.changes/next-release/feature-AmazonManagedBlockchainQuery-b049e7a.json b/.changes/next-release/feature-AmazonManagedBlockchainQuery-b049e7a.json deleted file mode 100644 index 6dc090e0ef34..000000000000 --- a/.changes/next-release/feature-AmazonManagedBlockchainQuery-b049e7a.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Managed Blockchain Query", - "contributor": "", - "description": "Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs." -} diff --git a/.changes/next-release/feature-AmazonOmics-77634b7.json b/.changes/next-release/feature-AmazonOmics-77634b7.json deleted file mode 100644 index 6bc9604671b3..000000000000 --- a/.changes/next-release/feature-AmazonOmics-77634b7.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Omics", - "contributor": "", - "description": "The service is renaming as a part of AWS Health." -} diff --git a/.changes/next-release/feature-AmazonPolly-c852dc1.json b/.changes/next-release/feature-AmazonPolly-c852dc1.json deleted file mode 100644 index ee3b323d9b65..000000000000 --- a/.changes/next-release/feature-AmazonPolly-c852dc1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Polly", - "contributor": "", - "description": "Amazon Polly adds 1 new voice - Lisa (nl-BE)" -} diff --git a/.changes/next-release/feature-AmazonRoute53-3f67164.json b/.changes/next-release/feature-AmazonRoute53-3f67164.json deleted file mode 100644 index ef591937a1b4..000000000000 --- a/.changes/next-release/feature-AmazonRoute53-3f67164.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Route 53", - "contributor": "", - "description": "Update that corrects the documents for received feedback." -} diff --git a/.changes/next-release/feature-OpenSearchServiceServerless-3bc0d2a.json b/.changes/next-release/feature-OpenSearchServiceServerless-3bc0d2a.json deleted file mode 100644 index 995a4f29244c..000000000000 --- a/.changes/next-release/feature-OpenSearchServiceServerless-3bc0d2a.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "OpenSearch Service Serverless", - "contributor": "", - "description": "This release adds new collection type VectorSearch." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index ec8d505f0dc8..697b636fc835 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,48 @@ +# __2.20.112__ __2023-07-26__ +## __AWS Cloud Control API__ + - ### Features + - Updates the documentation for CreateResource. + +## __AWS Elemental MediaConvert__ + - ### Features + - This release includes general updates to user documentation. + +## __AWS EntityResolution__ + - ### Features + - AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information. + +## __AWS Glue__ + - ### Features + - Release Glue Studio Snowflake Connector Node for SDK/CLI + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon HealthLake__ + - ### Features + - Updating the HealthLake service documentation. + +## __Amazon Managed Blockchain Query__ + - ### Features + - Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs. + +## __Amazon Omics__ + - ### Features + - The service is renaming as a part of AWS Health. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds 1 new voice - Lisa (nl-BE) + +## __Amazon Route 53__ + - ### Features + - Update that corrects the documents for received feedback. + +## __OpenSearch Service Serverless__ + - ### Features + - This release adds new collection type VectorSearch. + # __2.20.111__ __2023-07-25__ ## __AWS DataSync__ - ### Features @@ -791,7 +836,7 @@ Special thanks to the following contributors to this release: ## __Contributors__ Special thanks to the following contributors to this release: -[@breader124](https://github.com/breader124), [@bmaizels](https://github.com/bmaizels) +[@bmaizels](https://github.com/bmaizels), [@breader124](https://github.com/breader124) # __2.20.85__ __2023-06-13__ ## __AWS CloudTrail__ - ### Features diff --git a/README.md b/README.md index cdb7f0f77fda..395ce77cdf67 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same verInformation about the evaluation status of the rules for the training job.
" }, + "ProfilerConfig":{"shape":"ProfilerConfig"}, "Environment":{ "shape":"TrainingEnvironmentMap", "documentation":"The environment variables to set in the Docker container.
" From 07aa886a1063d853beda87f0feacaa3deebb302a Mon Sep 17 00:00:00 2001 From: AWS <> Date: Thu, 27 Jul 2023 18:09:31 +0000 Subject: [PATCH 048/270] Amazon Elastic Block Store Update: SDK and documentation updates for Amazon Elastic Block Store API --- ...ature-AmazonElasticBlockStore-fedaae5.json | 6 + .../codegen-resources/endpoint-rule-set.json | 399 ++++++----- .../codegen-resources/endpoint-tests.json | 630 +++++++++++++++++- .../codegen-resources/service-2.json | 37 +- 4 files changed, 876 insertions(+), 196 deletions(-) create mode 100644 .changes/next-release/feature-AmazonElasticBlockStore-fedaae5.json diff --git a/.changes/next-release/feature-AmazonElasticBlockStore-fedaae5.json b/.changes/next-release/feature-AmazonElasticBlockStore-fedaae5.json new file mode 100644 index 000000000000..0a27ae293c21 --- /dev/null +++ b/.changes/next-release/feature-AmazonElasticBlockStore-fedaae5.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Elastic Block Store", + "contributor": "", + "description": "SDK and documentation updates for Amazon Elastic Block Store API" +} diff --git a/services/ebs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/ebs/src/main/resources/codegen-resources/endpoint-rule-set.json index 66091be73dc1..5a6ff8bebb8f 100644 --- a/services/ebs/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/ebs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ebs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ebs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ebs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ebs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://ebs-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ebs.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ebs.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://ebs.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/ebs/src/main/resources/codegen-resources/endpoint-tests.json b/services/ebs/src/main/resources/codegen-resources/endpoint-tests.json index 91c92af29cd0..3d3883d4240b 100644 --- a/services/ebs/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/ebs/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,16 +1,632 @@ { "testCases": [ { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.af-south-1.amazonaws.com" + } + }, + "params": { + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-east-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-northeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-northeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-south-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-southeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-southeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-southeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-central-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-north-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-south-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-west-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-west-3.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.me-south-1.amazonaws.com" + } + }, + "params": { + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.sa-east-1.amazonaws.com" + } + }, + "params": { + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -20,9 +636,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -32,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/ebs/src/main/resources/codegen-resources/service-2.json b/services/ebs/src/main/resources/codegen-resources/service-2.json index bea4106a85fc..839422e2c6cb 100644 --- a/services/ebs/src/main/resources/codegen-resources/service-2.json +++ b/services/ebs/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"Seals and completes the snapshot after all of the required blocks of data have been written to it. Completing the snapshot changes the status to completed. You cannot write new blocks to a snapshot after it has been completed.
Seals and completes the snapshot after all of the required blocks of data have been written to it. Completing the snapshot changes the status to completed. You cannot write new blocks to a snapshot after it has been completed.
You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.
Returns the data in a block in an Amazon Elastic Block Store snapshot.
" + "documentation":"Returns the data in a block in an Amazon Elastic Block Store snapshot.
You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.
Returns information about the blocks that are different between two Amazon Elastic Block Store snapshots of the same volume/snapshot lineage.
" + "documentation":"Returns information about the blocks that are different between two Amazon Elastic Block Store snapshots of the same volume/snapshot lineage.
You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.
Returns information about the blocks in an Amazon Elastic Block Store snapshot.
" + "documentation":"Returns information about the blocks in an Amazon Elastic Block Store snapshot.
You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.
Writes a block of data to a snapshot. If the specified block contains data, the existing data is overwritten. The target snapshot must be in the pending state.
Data written to a snapshot must be aligned with 512-KiB sectors.
", + "documentation":"Writes a block of data to a snapshot. If the specified block contains data, the existing data is overwritten. The target snapshot must be in the pending state.
Data written to a snapshot must be aligned with 512-KiB sectors.
You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.
Creates a new Amazon EBS snapshot. The new snapshot enters the pending state after the request completes.
After creating the snapshot, use PutSnapshotBlock to write blocks of data to the snapshot.
" + "documentation":"Creates a new Amazon EBS snapshot. The new snapshot enters the pending state after the request completes.
After creating the snapshot, use PutSnapshotBlock to write blocks of data to the snapshot.
You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.
An internal error has occurred.
", + "documentation":"An internal error has occurred. For more information see Error retries.
", "error":{"httpStatusCode":500}, "exception":true, "fault":true @@ -599,7 +599,7 @@ "documentation":"The reason for the exception.
" } }, - "documentation":"The number of API requests has exceed the maximum allowed API request throttling limit.
", + "documentation":"The number of API requests has exceeded the maximum allowed API request throttling limit for the snapshot. For more information see Error retries.
", "error":{"httpStatusCode":400}, "exception":true }, @@ -628,7 +628,17 @@ "type":"string", "enum":[ "SNAPSHOT_NOT_FOUND", - "DEPENDENCY_RESOURCE_NOT_FOUND" + "GRANT_NOT_FOUND", + "DEPENDENCY_RESOURCE_NOT_FOUND", + "IMAGE_NOT_FOUND" + ] + }, + "SSEType":{ + "type":"string", + "enum":[ + "sse-ebs", + "sse-kms", + "none" ] }, "ServiceQuotaExceededException":{ @@ -735,6 +745,10 @@ "KmsKeyArn":{ "shape":"KmsKeyArn", "documentation":"The Amazon Resource Name (ARN) of the Key Management Service (KMS) key used to encrypt the snapshot.
" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"Reserved for future use.
" } } }, @@ -800,6 +814,7 @@ "INVALID_CUSTOMER_KEY", "INVALID_PAGE_TOKEN", "INVALID_BLOCK_TOKEN", + "INVALID_GRANT_TOKEN", "INVALID_SNAPSHOT_ID", "UNRELATED_SNAPSHOTS", "INVALID_BLOCK", @@ -808,7 +823,9 @@ "INVALID_DEPENDENCY_REQUEST", "INVALID_PARAMETER_VALUE", "INVALID_VOLUME_SIZE", - "CONFLICTING_BLOCK_UPDATE" + "CONFLICTING_BLOCK_UPDATE", + "INVALID_IMAGE_ID", + "WRITE_REQUEST_TIMEOUT" ] }, "VolumeSize":{ From e8c51a0162f846d8438b83b0dbd82e3e04866840 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Thu, 27 Jul 2023 18:09:35 +0000 Subject: [PATCH 049/270] Auto Scaling Update: This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy. --- .../next-release/feature-AutoScaling-b2e97c5.json | 6 ++++++ .../resources/codegen-resources/paginators-1.json | 6 ++++++ .../main/resources/codegen-resources/service-2.json | 13 +++++++------ 3 files changed, 19 insertions(+), 6 deletions(-) create mode 100644 .changes/next-release/feature-AutoScaling-b2e97c5.json diff --git a/.changes/next-release/feature-AutoScaling-b2e97c5.json b/.changes/next-release/feature-AutoScaling-b2e97c5.json new file mode 100644 index 000000000000..bf3d652d8354 --- /dev/null +++ b/.changes/next-release/feature-AutoScaling-b2e97c5.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy." +} diff --git a/services/autoscaling/src/main/resources/codegen-resources/paginators-1.json b/services/autoscaling/src/main/resources/codegen-resources/paginators-1.json index 550ec09c7252..67362bd5118b 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/paginators-1.json +++ b/services/autoscaling/src/main/resources/codegen-resources/paginators-1.json @@ -52,6 +52,12 @@ "input_token": "NextToken", "limit_key": "MaxRecords", "output_token": "NextToken" + }, + "DescribeWarmPool": { + "input_token": "NextToken", + "limit_key": "MaxRecords", + "output_token": "NextToken", + "result_key": "Instances" } } } \ No newline at end of file diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index 0d626df5856e..9ed7158a547c 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -141,7 +141,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"Completes the lifecycle action for the specified token or instance with the specified result.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a launch template or launch configuration with a user data script that runs while an instance is in a wait state due to a lifecycle hook.
(Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when an instance is put into a wait state due to a lifecycle hook.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state.
If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.
For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
" + "documentation":"Completes the lifecycle action for the specified token or instance with the specified result.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a launch template or launch configuration with a user data script that runs while an instance is in a wait state due to a lifecycle hook.
(Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when an instance is put into a wait state due to a lifecycle hook.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state.
If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.
For more information, see Complete a lifecycle action in the Amazon EC2 Auto Scaling User Guide.
" }, "CreateAutoScalingGroup":{ "name":"CreateAutoScalingGroup", @@ -705,7 +705,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"Detaches one or more traffic sources from the specified Auto Scaling group.
When you detach a taffic, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the traffic source using the DescribeTrafficSources API call. The instances continue to run.
Detaches one or more traffic sources from the specified Auto Scaling group.
When you detach a traffic source, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the traffic source using the DescribeTrafficSources API call. The instances continue to run.
The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a positive value.
Required if the policy type is SimpleScaling. (Not used with any other policy type.)
The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a non-negative value.
Required if the policy type is SimpleScaling. (Not used with any other policy type.)
The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.
The amount by which to scale. The adjustment is based on the value that you specified in the AdjustmentType property (either an absolute number or a percentage). A positive value adds to the current capacity and a negative number subtracts from the current capacity.
The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a non-negative value.
" } }, "documentation":"Describes information used to create a step adjustment for a step scaling policy.
For the following examples, suppose that you have an alarm with a breach threshold of 50:
To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.
To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.
There are a few rules for the step adjustments for your step policy:
The ranges of your step adjustments can't overlap or have a gap.
At most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.
At most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.
The upper and lower bound can't be null in the same step adjustment.
For more information, see Step adjustments in the Amazon EC2 Auto Scaling User Guide.
" From add14cc1ebfd185fe524f83dfc8072f351308b03 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Thu, 27 Jul 2023 18:09:36 +0000 Subject: [PATCH 050/270] Amazon Elastic Compute Cloud Update: SDK and documentation updates for Amazon Elastic Block Store APIs --- ...ure-AmazonElasticComputeCloud-dd4dfa9.json | 6 ++++ .../codegen-resources/service-2.json | 33 +++++++++++++++++++ .../codegen-resources/waiters-2.json | 25 ++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 .changes/next-release/feature-AmazonElasticComputeCloud-dd4dfa9.json diff --git a/.changes/next-release/feature-AmazonElasticComputeCloud-dd4dfa9.json b/.changes/next-release/feature-AmazonElasticComputeCloud-dd4dfa9.json new file mode 100644 index 000000000000..67409026da7f --- /dev/null +++ b/.changes/next-release/feature-AmazonElasticComputeCloud-dd4dfa9.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "SDK and documentation updates for Amazon Elastic Block Store APIs" +} diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 9793e60d3dac..ab1747b2f4fe 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -29400,6 +29400,11 @@ "shape":"Boolean", "documentation":"Indicates whether encryption by default is enabled.
", "locationName":"ebsEncryptionByDefault" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"Reserved for future use.
", + "locationName":"sseType" } } }, @@ -48490,6 +48495,11 @@ "shape":"Integer", "documentation":"The size of the volume, in GiB.
", "locationName":"volumeSize" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"Reserved for future use.
", + "locationName":"sseType" } } }, @@ -49343,6 +49353,14 @@ }, "documentation":"Describes the storage parameters for Amazon S3 and Amazon S3 buckets for an instance store-backed AMI.
" }, + "SSEType":{ + "type":"string", + "enum":[ + "sse-ebs", + "sse-kms", + "none" + ] + }, "ScheduledInstance":{ "type":"structure", "members":{ @@ -50643,6 +50661,11 @@ "shape":"MillisecondDateTime", "documentation":"Only for archived snapshots that are temporarily restored. Indicates the date and time when a temporarily restored snapshot will be automatically re-archived.
", "locationName":"restoreExpiryTime" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"Reserved for future use.
", + "locationName":"sseType" } }, "documentation":"Describes a snapshot.
" @@ -50804,6 +50827,11 @@ "shape":"String", "documentation":"The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.
", "locationName":"outpostArn" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"Reserved for future use.
", + "locationName":"sseType" } }, "documentation":"Information about a snapshot.
" @@ -56665,6 +56693,11 @@ "shape":"Integer", "documentation":"The throughput that the volume supports, in MiB/s.
", "locationName":"throughput" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"Reserved for future use.
", + "locationName":"sseType" } }, "documentation":"Describes a volume.
" diff --git a/services/ec2/src/main/resources/codegen-resources/waiters-2.json b/services/ec2/src/main/resources/codegen-resources/waiters-2.json index 4f73e720fcc7..e890388e73b7 100644 --- a/services/ec2/src/main/resources/codegen-resources/waiters-2.json +++ b/services/ec2/src/main/resources/codegen-resources/waiters-2.json @@ -506,6 +506,31 @@ } ] }, + "StoreImageTaskComplete": { + "delay": 5, + "operation": "DescribeStoreImageTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "Completed", + "matcher": "pathAll", + "state": "success", + "argument": "StoreImageTaskResults[].StoreTaskState" + }, + { + "expected": "Failed", + "matcher": "pathAny", + "state": "failure", + "argument": "StoreImageTaskResults[].StoreTaskState" + }, + { + "expected": "InProgress", + "matcher": "pathAny", + "state": "retry", + "argument": "StoreImageTaskResults[].StoreTaskState" + } + ] + }, "SubnetAvailable": { "delay": 15, "operation": "DescribeSubnets", From 4c31116c58880b3dc80bf17888f99342f4155d49 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Thu, 27 Jul 2023 18:09:36 +0000 Subject: [PATCH 051/270] Amazon Elastic Kubernetes Service Update: Add multiple customer error code to handle customer caused failure when managing EKS node groups --- ...mazonElasticKubernetesService-64381c0.json | 6 + .../codegen-resources/endpoint-rule-set.json | 442 ++--- .../codegen-resources/endpoint-tests.json | 1452 +++-------------- .../codegen-resources/service-2.json | 16 +- 4 files changed, 447 insertions(+), 1469 deletions(-) create mode 100644 .changes/next-release/feature-AmazonElasticKubernetesService-64381c0.json diff --git a/.changes/next-release/feature-AmazonElasticKubernetesService-64381c0.json b/.changes/next-release/feature-AmazonElasticKubernetesService-64381c0.json new file mode 100644 index 000000000000..75e2f10174e8 --- /dev/null +++ b/.changes/next-release/feature-AmazonElasticKubernetesService-64381c0.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Add multiple customer error code to handle customer caused failure when managing EKS node groups" +} diff --git a/services/eks/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/eks/src/main/resources/codegen-resources/endpoint-rule-set.json index 4b665084edcf..4d1971250c54 100644 --- a/services/eks/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/eks/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,224 +111,288 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "Region" } - ] - }, + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://eks-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", "argv": [ - "aws", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsDualStack" ] } ] } ], - "endpoint": { - "url": "https://fips.eks.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://eks-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://eks.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://fips.eks.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://eks.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://eks-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://eks-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://eks.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://eks.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://eks.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://eks.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/eks/src/main/resources/codegen-resources/endpoint-tests.json b/services/eks/src/main/resources/codegen-resources/endpoint-tests.json index 8cc05949df50..0355c7dcc074 100644 --- a/services/eks/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/eks/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,980 +1,31 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-northeast-3.amazonaws.com" + "url": "https://eks.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-northeast-2.amazonaws.com" + "url": "https://eks.ap-east-1.amazonaws.com" } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", "UseDualStack": false } }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -983,680 +34,537 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.me-south-1.amazonaws.com" - } - }, - "params": { "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.sa-east-1.amazonaws.com" + "url": "https://eks.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-east-1.api.aws" + "url": "https://eks.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-east-1.amazonaws.com" + "url": "https://eks.ap-south-1.amazonaws.com" } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.cn-north-1.amazonaws.com.cn" + "url": "https://eks.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "cn-north-1", + "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://eks.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.cn-north-1.amazonaws.com.cn" + "url": "https://eks.ap-southeast-3.amazonaws.com" } }, "params": { + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "cn-north-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-gov-west-1.amazonaws.com" + "url": "https://eks.ca-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", + "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-gov-west-1.api.aws" + "url": "https://eks.eu-central-1.amazonaws.com" } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-gov-west-1.amazonaws.com" + "url": "https://eks.eu-north-1.amazonaws.com" } }, "params": { + "Region": "eu-north-1", "UseFIPS": false, - "Region": "us-gov-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.ap-southeast-1.api.aws" + "url": "https://eks.eu-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.ap-southeast-1.amazonaws.com" + "url": "https://eks.eu-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", + "Region": "eu-west-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-1.api.aws" + "url": "https://eks.eu-west-2.amazonaws.com" } }, "params": { + "Region": "eu-west-2", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-1.amazonaws.com" + "url": "https://eks.eu-west-3.amazonaws.com" } }, "params": { + "Region": "eu-west-3", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.ap-southeast-2.api.aws" + "url": "https://eks.me-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.ap-southeast-2.amazonaws.com" + "url": "https://eks.sa-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", + "Region": "sa-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-2.api.aws" + "url": "https://eks.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-2.amazonaws.com" + "url": "https://fips.eks.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-2", + "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://eks.us-east-2.amazonaws.com" + } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://fips.eks.us-east-2.amazonaws.com" } }, "params": { + "Region": "us-east-2", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://eks.us-west-1.amazonaws.com" + } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-iso-east-1.c2s.ic.gov" + "url": "https://fips.eks.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-iso-east-1", + "Region": "us-west-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.ap-southeast-3.api.aws" + "url": "https://eks.us-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.ap-southeast-3.amazonaws.com" + "url": "https://fips.eks.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": true, - "Region": "ap-southeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-3.api.aws" + "url": "https://eks-fips.us-east-1.api.aws" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-3", + "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-3.amazonaws.com" + "url": "https://eks.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.ap-southeast-4.api.aws" + "url": "https://eks.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-4", - "UseDualStack": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.ap-southeast-4.amazonaws.com" + "url": "https://eks.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-4", + "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-4.api.aws" + "url": "https://eks-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-4", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-4.amazonaws.com" + "url": "https://eks-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-4", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks-fips.us-east-1.api.aws" + "url": "https://eks.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.us-east-1.amazonaws.com" + "url": "https://eks.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", + "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-east-1.api.aws" + "url": "https://eks.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-east-1.amazonaws.com" + "url": "https://eks.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.us-east-2.api.aws" + "url": "https://eks.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fips.eks.us-east-2.amazonaws.com" + "url": "https://eks-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks.us-east-2.api.aws" + "url": "https://eks.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-east-2.amazonaws.com" + "url": "https://eks.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://eks-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://eks-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://eks.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.cn-northwest-1.amazonaws.com.cn" + "url": "https://eks.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": false } }, @@ -1666,8 +574,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -1679,8 +587,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -1690,26 +598,27 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://eks.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-isob-east-1", - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1717,7 +626,6 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1728,8 +636,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1740,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/eks/src/main/resources/codegen-resources/service-2.json b/services/eks/src/main/resources/codegen-resources/service-2.json index 1c0342872825..4452bcc29fe7 100644 --- a/services/eks/src/main/resources/codegen-resources/service-2.json +++ b/services/eks/src/main/resources/codegen-resources/service-2.json @@ -2669,6 +2669,7 @@ "Ec2SubnetNotFound", "Ec2SubnetInvalidConfiguration", "IamInstanceProfileNotFound", + "Ec2SubnetMissingIpv6Assignment", "IamLimitExceeded", "IamNodeRoleNotFound", "NodeCreationFailure", @@ -2678,7 +2679,20 @@ "AccessDenied", "InternalFailure", "ClusterUnreachable", - "Ec2SubnetMissingIpv6Assignment" + "AmiIdNotFound", + "AutoScalingGroupOptInRequired", + "AutoScalingGroupRateLimitExceeded", + "Ec2LaunchTemplateDeletionFailure", + "Ec2LaunchTemplateInvalidConfiguration", + "Ec2LaunchTemplateMaxLimitExceeded", + "Ec2SubnetListTooLong", + "IamThrottling", + "NodeTerminationFailure", + "PodEvictionFailure", + "SourceEc2LaunchTemplateNotFound", + "LimitExceeded", + "Unknown", + "AutoScalingGroupInstanceRefreshActive" ] }, "NodegroupResources":{ From b7334e08eedf5b3036c219dc9e2568cb6956d96a Mon Sep 17 00:00:00 2001 From: AWS <> Date: Thu, 27 Jul 2023 18:12:39 +0000 Subject: [PATCH 052/270] Release 2.20.113. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.113.json | 36 +++++++++++++++++++ ...ature-AmazonElasticBlockStore-fedaae5.json | 6 ---- ...ure-AmazonElasticComputeCloud-dd4dfa9.json | 6 ---- ...mazonElasticKubernetesService-64381c0.json | 6 ---- ...eature-AmazonSageMakerService-6b80751.json | 6 ---- .../feature-AutoScaling-b2e97c5.json | 6 ---- CHANGELOG.md | 23 +++++++++++- README.md | 8 ++--- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- 424 files changed, 478 insertions(+), 451 deletions(-) create mode 100644 .changes/2.20.113.json delete mode 100644 .changes/next-release/feature-AmazonElasticBlockStore-fedaae5.json delete mode 100644 .changes/next-release/feature-AmazonElasticComputeCloud-dd4dfa9.json delete mode 100644 .changes/next-release/feature-AmazonElasticKubernetesService-64381c0.json delete mode 100644 .changes/next-release/feature-AmazonSageMakerService-6b80751.json delete mode 100644 .changes/next-release/feature-AutoScaling-b2e97c5.json diff --git a/.changes/2.20.113.json b/.changes/2.20.113.json new file mode 100644 index 000000000000..fc5fa7c5ce3a --- /dev/null +++ b/.changes/2.20.113.json @@ -0,0 +1,36 @@ +{ + "version": "2.20.113", + "date": "2023-07-27", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Block Store", + "contributor": "", + "description": "SDK and documentation updates for Amazon Elastic Block Store API" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "SDK and documentation updates for Amazon Elastic Block Store APIs" + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Add multiple customer error code to handle customer caused failure when managing EKS node groups" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Expose ProfilerConfig attribute in SageMaker Search API response." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AmazonElasticBlockStore-fedaae5.json b/.changes/next-release/feature-AmazonElasticBlockStore-fedaae5.json deleted file mode 100644 index 0a27ae293c21..000000000000 --- a/.changes/next-release/feature-AmazonElasticBlockStore-fedaae5.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Elastic Block Store", - "contributor": "", - "description": "SDK and documentation updates for Amazon Elastic Block Store API" -} diff --git a/.changes/next-release/feature-AmazonElasticComputeCloud-dd4dfa9.json b/.changes/next-release/feature-AmazonElasticComputeCloud-dd4dfa9.json deleted file mode 100644 index 67409026da7f..000000000000 --- a/.changes/next-release/feature-AmazonElasticComputeCloud-dd4dfa9.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Elastic Compute Cloud", - "contributor": "", - "description": "SDK and documentation updates for Amazon Elastic Block Store APIs" -} diff --git a/.changes/next-release/feature-AmazonElasticKubernetesService-64381c0.json b/.changes/next-release/feature-AmazonElasticKubernetesService-64381c0.json deleted file mode 100644 index 75e2f10174e8..000000000000 --- a/.changes/next-release/feature-AmazonElasticKubernetesService-64381c0.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Elastic Kubernetes Service", - "contributor": "", - "description": "Add multiple customer error code to handle customer caused failure when managing EKS node groups" -} diff --git a/.changes/next-release/feature-AmazonSageMakerService-6b80751.json b/.changes/next-release/feature-AmazonSageMakerService-6b80751.json deleted file mode 100644 index e200e5818843..000000000000 --- a/.changes/next-release/feature-AmazonSageMakerService-6b80751.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon SageMaker Service", - "contributor": "", - "description": "Expose ProfilerConfig attribute in SageMaker Search API response." -} diff --git a/.changes/next-release/feature-AutoScaling-b2e97c5.json b/.changes/next-release/feature-AutoScaling-b2e97c5.json deleted file mode 100644 index bf3d652d8354..000000000000 --- a/.changes/next-release/feature-AutoScaling-b2e97c5.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Auto Scaling", - "contributor": "", - "description": "This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 697b636fc835..22952b6f12d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +# __2.20.113__ __2023-07-27__ +## __Amazon Elastic Block Store__ + - ### Features + - SDK and documentation updates for Amazon Elastic Block Store API + +## __Amazon Elastic Compute Cloud__ + - ### Features + - SDK and documentation updates for Amazon Elastic Block Store APIs + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Add multiple customer error code to handle customer caused failure when managing EKS node groups + +## __Amazon SageMaker Service__ + - ### Features + - Expose ProfilerConfig attribute in SageMaker Search API response. + +## __Auto Scaling__ + - ### Features + - This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy. + # __2.20.112__ __2023-07-26__ ## __AWS Cloud Control API__ - ### Features @@ -836,7 +857,7 @@ Special thanks to the following contributors to this release: ## __Contributors__ Special thanks to the following contributors to this release: -[@bmaizels](https://github.com/bmaizels), [@breader124](https://github.com/breader124) +[@breader124](https://github.com/breader124), [@bmaizels](https://github.com/bmaizels) # __2.20.85__ __2023-06-13__ ## __AWS CloudTrail__ - ### Features diff --git a/README.md b/README.md index 395ce77cdf67..15b0308c7bc7 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same verCancels a specified message movement task.
A message movement can only be cancelled when the current status is RUNNING.
Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been moved yet.
Cancels a specified message movement task. A message movement can only be cancelled when the current status is RUNNING. Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been moved yet.
This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.
Currently, only standard queues are supported.
Only one active message movement task is supported per queue at any given time.
Gets the most recent message movement tasks (up to 10) under a specific source queue.
" + "documentation":"Gets the most recent message movement tasks (up to 10) under a specific source queue.
This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.
Currently, only standard queues are supported.
Only one active message movement task is supported per queue at any given time.
Deletes the messages in a queue specified by the QueueURL parameter.
When you use the PurgeQueue action, you can't retrieve any messages deleted from a queue.
The message deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's size.
Messages sent to the queue before you call PurgeQueue might be received but are deleted within the next minute.
Messages sent to the queue after you call PurgeQueue might be deleted while the queue is being purged.
Deletes available messages in a queue (including in-flight messages) specified by the QueueURL parameter.
When you use the PurgeQueue action, you can't retrieve any messages deleted from a queue.
The message deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's size.
Messages sent to the queue before you call PurgeQueue might be received but are deleted within the next minute.
Messages sent to the queue after you call PurgeQueue might be deleted while the queue is being purged.
Starts an asynchronous task to move messages from a specified source queue to a specified destination queue.
This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.
Currently, only standard queues are supported.
Only one active message movement task is supported per queue at any given time.
Starts an asynchronous task to move messages from a specified source queue to a specified destination queue.
This action is currently limited to supporting message redrive from queues that are configured as dead-letter queues (DLQs) of other Amazon SQS queues only. Non-SQS queue sources of dead-letter queues, such as Lambda or Amazon SNS topics, are currently not supported.
In dead-letter queues redrive context, the StartMessageMoveTask the source queue is the DLQ, while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.
Currently, only standard queues support redrive. FIFO queues don't support redrive.
Only one active message movement task is supported per queue at any given time.
The ARN of the queue that contains the messages to be moved to another queue. Currently, only dead-letter queue (DLQ) ARNs are accepted.
" + "documentation":"The ARN of the queue that contains the messages to be moved to another queue. Currently, only ARNs of dead-letter queues (DLQs) whose sources are other Amazon SQS queues are accepted. DLQs whose sources are non-SQS queues, such as Lambda or Amazon SNS topics, are not currently supported.
" }, "DestinationArn":{ "shape":"String", From a1f8389fc4a2e7dc03fc3e6f965e081f2aea5da9 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 28 Jul 2023 00:23:46 +0000 Subject: [PATCH 056/270] Release 2.20.114. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.114.json | 12 ++++++++++++ .../feature-AmazonSimpleQueueService-80b9fff.json | 6 ------ CHANGELOG.md | 5 +++++ README.md | 8 ++++---- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- services/licensemanagerlinuxsubscriptions/pom.xml | 2 +- services/licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- services/serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../third-party-jackson-dataformat-cbor/pom.xml | 2 +- utils/pom.xml | 2 +- 420 files changed, 437 insertions(+), 426 deletions(-) create mode 100644 .changes/2.20.114.json delete mode 100644 .changes/next-release/feature-AmazonSimpleQueueService-80b9fff.json diff --git a/.changes/2.20.114.json b/.changes/2.20.114.json new file mode 100644 index 000000000000..d03a88e10779 --- /dev/null +++ b/.changes/2.20.114.json @@ -0,0 +1,12 @@ +{ + "version": "2.20.114", + "date": "2023-07-27", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "contributor": "", + "description": "Documentation changes related to SQS APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AmazonSimpleQueueService-80b9fff.json b/.changes/next-release/feature-AmazonSimpleQueueService-80b9fff.json deleted file mode 100644 index 530db3258a10..000000000000 --- a/.changes/next-release/feature-AmazonSimpleQueueService-80b9fff.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Simple Queue Service", - "contributor": "", - "description": "Documentation changes related to SQS APIs." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 22952b6f12d7..040402f8ab15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# __2.20.114__ __2023-07-27__ +## __Amazon Simple Queue Service__ + - ### Features + - Documentation changes related to SQS APIs. + # __2.20.113__ __2023-07-27__ ## __Amazon Elastic Block Store__ - ### Features diff --git a/README.md b/README.md index 15b0308c7bc7..0fe67dac38e8 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same verReturns a description of the cluster operation specified by the ARN.
\n " }, + "DescribeClusterOperationV2" : { + "name" : "DescribeClusterOperationV2", + "http" : { + "method" : "GET", + "requestUri" : "/api/v2/operations/{clusterOperationArn}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeClusterOperationV2Request" + }, + "output" : { + "shape" : "DescribeClusterOperationV2Response", + "documentation" : "\nHTTP Status Code 200: OK.
" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "\nHTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.
" + }, { + "shape" : "UnauthorizedException", + "documentation" : "\nHTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.
" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "\nHTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.
" + }, { + "shape" : "ForbiddenException", + "documentation" : "\nHTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.
" + }, { + "shape" : "NotFoundException", + "documentation" : "\nHTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.
" + }, { + "shape" : "ServiceUnavailableException", + "documentation" : "\nHTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.
" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "\nHTTP Status Code 429: Limit exceeded. Resource limit reached.
" + } ], + "documentation" : "\nReturns a description of the cluster operation specified by the ARN.
\n" + }, "DescribeConfiguration": { "name": "DescribeConfiguration", "http": { @@ -795,6 +833,44 @@ ], "documentation": "\nReturns a list of all the operations that have been performed on the specified MSK cluster.
\n " }, + "ListClusterOperationsV2" : { + "name" : "ListClusterOperationsV2", + "http" : { + "method" : "GET", + "requestUri" : "/api/v2/clusters/{clusterArn}/operations", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListClusterOperationsV2Request" + }, + "output" : { + "shape" : "ListClusterOperationsV2Response", + "documentation" : "\nHTTP Status Code 200: OK.
" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "\nHTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.
" + }, { + "shape" : "UnauthorizedException", + "documentation" : "\nHTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.
" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "\nHTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.
" + }, { + "shape" : "ForbiddenException", + "documentation" : "\nHTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.
" + }, { + "shape" : "NotFoundException", + "documentation" : "\nHTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.
" + }, { + "shape" : "ServiceUnavailableException", + "documentation" : "\nHTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.
" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "\nHTTP Status Code 429: Limit exceeded. Resource limit reached.
" + } ], + "documentation" : "\nReturns a list of all the operations that have been performed on the specified MSK cluster.
\n " + }, "ListClusters": { "name": "ListClusters", "http": { @@ -2977,6 +3053,140 @@ } } }, + "ClusterOperationV2" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "locationName" : "clusterArn", + "documentation" : "\nARN of the cluster.
" + }, + "ClusterType" : { + "shape" : "ClusterType", + "locationName" : "clusterType", + "documentation" : "\nType of the backend cluster.
" + }, + "StartTime" : { + "shape" : "__timestampIso8601", + "locationName" : "startTime", + "documentation" : "\nThe time at which operation was started.
" + }, + "EndTime" : { + "shape" : "__timestampIso8601", + "locationName" : "endTime", + "documentation" : "\nThe time at which the operation finished.
" + }, + "ErrorInfo" : { + "shape" : "ErrorInfo", + "locationName" : "errorInfo", + "documentation" : "\nIf cluster operation failed from an error, it describes the error.
" + }, + "OperationArn" : { + "shape" : "__string", + "locationName" : "operationArn", + "documentation" : "\nARN of the cluster operation.
" + }, + "OperationState" : { + "shape" : "__string", + "locationName" : "operationState", + "documentation" : "\nState of the cluster operation.
" + }, + "OperationType" : { + "shape" : "__string", + "locationName" : "operationType", + "documentation" : "\nType of the cluster operation.
" + }, + "Provisioned" : { + "shape" : "ClusterOperationV2Provisioned", + "locationName" : "provisioned", + "documentation" : "\nProperties of a provisioned cluster.
" + }, + "Serverless" : { + "shape" : "ClusterOperationV2Serverless", + "locationName" : "serverless", + "documentation" : "\nProperties of a serverless cluster.
" + } + }, + "documentation" : "\nReturns information about a cluster operation.
" + }, + "ClusterOperationV2Provisioned" : { + "type" : "structure", + "members" : { + "OperationSteps" : { + "shape" : "__listOfClusterOperationStep", + "locationName" : "operationSteps", + "documentation" : "\nSteps completed during the operation.
" + }, + "SourceClusterInfo" : { + "shape" : "MutableClusterInfo", + "locationName" : "sourceClusterInfo", + "documentation" : "\nInformation about cluster attributes before a cluster is updated.
" + }, + "TargetClusterInfo" : { + "shape" : "MutableClusterInfo", + "locationName" : "targetClusterInfo", + "documentation" : "\nInformation about cluster attributes after a cluster is updated.
" + }, + "VpcConnectionInfo" : { + "shape" : "VpcConnectionInfo", + "locationName" : "vpcConnectionInfo", + "documentation" : "\nDescription of the VPC connection for CreateVpcConnection and DeleteVpcConnection operations.
" + } + }, + "documentation" : "\nReturns information about a provisioned cluster operation.
" + }, + "ClusterOperationV2Serverless" : { + "type" : "structure", + "members" : { + "VpcConnectionInfo" : { + "shape" : "VpcConnectionInfoServerless", + "locationName" : "vpcConnectionInfo", + "documentation" : "\nDescription of the VPC connection for CreateVpcConnection and DeleteVpcConnection operations.
" + } + }, + "documentation" : "\nReturns information about a serverless cluster operation.
" + }, + "ClusterOperationV2Summary" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "locationName" : "clusterArn", + "documentation" : "\nARN of the cluster.
" + }, + "ClusterType" : { + "shape" : "ClusterType", + "locationName" : "clusterType", + "documentation" : "\nType of the backend cluster.
" + }, + "StartTime" : { + "shape" : "__timestampIso8601", + "locationName" : "startTime", + "documentation" : "\nThe time at which operation was started.
" + }, + "EndTime" : { + "shape" : "__timestampIso8601", + "locationName" : "endTime", + "documentation" : "\nThe time at which the operation finished.
" + }, + "OperationArn" : { + "shape" : "__string", + "locationName" : "operationArn", + "documentation" : "\nARN of the cluster operation.
" + }, + "OperationState" : { + "shape" : "__string", + "locationName" : "operationState", + "documentation" : "\nState of the cluster operation.
" + }, + "OperationType" : { + "shape" : "__string", + "locationName" : "operationType", + "documentation" : "\nType of the cluster operation.
" + } + }, + "documentation" : "\nReturns information about a cluster operation.
" + }, "DeleteClusterRequest": { "type": "structure", "members": { @@ -3103,6 +3313,18 @@ "ClusterOperationArn" ] }, + "DescribeClusterOperationV2Request" : { + "type" : "structure", + "members" : { + "ClusterOperationArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "clusterOperationArn", + "documentation" : "ARN of the cluster operation to describe." + } + }, + "required" : [ "ClusterOperationArn" ] + }, "DescribeClusterOperationResponse": { "type": "structure", "members": { @@ -3113,6 +3335,16 @@ } } }, + "DescribeClusterOperationV2Response" : { + "type" : "structure", + "members" : { + "ClusterOperationInfo" : { + "shape" : "ClusterOperationV2", + "locationName" : "clusterOperationInfo", + "documentation" : "\nCluster operation information
" + } + } + }, "DescribeClusterRequest": { "type": "structure", "members": { @@ -3690,6 +3922,30 @@ "ClusterArn" ] }, + "ListClusterOperationsV2Request" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "clusterArn", + "documentation" : "The arn of the cluster whose operations are being requested." + }, + "MaxResults" : { + "shape" : "MaxResults", + "location" : "querystring", + "locationName" : "maxResults", + "documentation" : "The maxResults of the query." + }, + "NextToken" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "nextToken", + "documentation" : "The nextToken of the query." + } + }, + "required" : [ "ClusterArn" ] + }, "ListClusterOperationsResponse": { "type": "structure", "members": { @@ -3705,6 +3961,21 @@ } } }, + "ListClusterOperationsV2Response" : { + "type" : "structure", + "members" : { + "ClusterOperationInfoList" : { + "shape" : "__listOfClusterOperationV2Summary", + "locationName" : "clusterOperationInfoList", + "documentation" : "\nAn array of cluster operation information objects.
" + }, + "NextToken" : { + "shape" : "__string", + "locationName" : "nextToken", + "documentation" : "\nIf the response of ListClusterOperationsV2 is truncated, it returns a NextToken in the response. This NextToken should be sent in the subsequent request to ListClusterOperationsV2.
" + } + } + }, "ListClustersRequest": { "type": "structure", "members": { @@ -5195,6 +5466,32 @@ }, "documentation": "\nDescription of the VPC connection.
\n " }, + "VpcConnectionInfoServerless" : { + "type" : "structure", + "members" : { + "CreationTime" : { + "shape" : "__timestampIso8601", + "locationName" : "creationTime", + "documentation" : "\nThe time when Amazon MSK creates the VPC Connnection.
" + }, + "Owner" : { + "shape" : "__string", + "locationName" : "owner", + "documentation" : "\nThe owner of the VPC Connection.
" + }, + "UserIdentity" : { + "shape" : "UserIdentity", + "locationName" : "userIdentity", + "documentation" : "\nDescription of the requester that calls the API operation.
" + }, + "VpcConnectionArn" : { + "shape" : "__string", + "locationName" : "vpcConnectionArn", + "documentation" : "\nThe Amazon Resource Name (ARN) of the VPC connection.
" + } + }, + "documentation" : "Description of the VPC connection." + }, "VpcConnectionState": { "type": "string", "documentation": "\nThe state of a VPC connection.
\n ", @@ -5297,6 +5594,12 @@ "shape": "ClusterOperationInfo" } }, + "__listOfClusterOperationV2Summary" : { + "type" : "list", + "member" : { + "shape" : "ClusterOperationV2Summary" + } + }, "__listOfClusterOperationStep" : { "type" : "list", "member" : { From 6a0c2bee989ba767b0bb10bc03869d0476d86b61 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 28 Jul 2023 18:10:01 +0000 Subject: [PATCH 059/270] Amazon CloudFront Update: Add a new JavaScript runtime version for CloudFront Functions. --- .../feature-AmazonCloudFront-8a88fc0.json | 6 + .../codegen-resources/endpoint-rule-set.json | 538 +++--------------- .../codegen-resources/endpoint-tests.json | 134 +++-- .../codegen-resources/service-2.json | 26 +- 4 files changed, 199 insertions(+), 505 deletions(-) create mode 100644 .changes/next-release/feature-AmazonCloudFront-8a88fc0.json diff --git a/.changes/next-release/feature-AmazonCloudFront-8a88fc0.json b/.changes/next-release/feature-AmazonCloudFront-8a88fc0.json new file mode 100644 index 000000000000..8fe2453c9ea0 --- /dev/null +++ b/.changes/next-release/feature-AmazonCloudFront-8a88fc0.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon CloudFront", + "contributor": "", + "description": "Add a new JavaScript runtime version for CloudFront Functions." +} diff --git a/services/cloudfront/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudfront/src/main/resources/codegen-resources/endpoint-rule-set.json index 2d908ad292a4..07a287ff6fef 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudfront/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -138,216 +138,91 @@ }, "aws" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + false + ] + } + ], + "endpoint": { + "url": "https://cloudfront.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "cloudfront", + "signingRegion": "us-east-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseFIPS" + "ref": "PartitionResult" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } + "name" ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + "aws" ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } + true ] }, { - "conditions": [], - "endpoint": { - "url": "https://cloudfront.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "us-east-1" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, - "headers": {} - }, - "type": "endpoint" + false + ] } - ] + ], + "endpoint": { + "url": "https://cloudfront-fips.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "cloudfront", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -365,208 +240,40 @@ }, "aws-cn" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront-fips.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront-fips.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + false + ] + } + ], + "endpoint": { + "url": "https://cloudfront.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "cloudfront", + "signingRegion": "cn-northwest-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -688,33 +395,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://cloudfront-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { @@ -797,60 +477,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://cloudfront.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://cloudfront.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/cloudfront/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudfront/src/main/resources/codegen-resources/endpoint-tests.json index 736cf87722f2..0f37f3195b07 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudfront/src/main/resources/codegen-resources/endpoint-tests.json @@ -18,8 +18,8 @@ }, "params": { "Region": "aws-global", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -40,8 +40,8 @@ }, "params": { "Region": "aws-global", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -53,8 +53,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -75,8 +75,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -88,8 +88,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -110,8 +110,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -132,8 +132,8 @@ }, "params": { "Region": "aws-cn-global", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -145,8 +145,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -158,8 +158,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -171,8 +171,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -193,8 +193,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -206,8 +206,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -219,8 +219,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -232,8 +232,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -245,8 +245,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -258,8 +269,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -271,8 +293,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -284,8 +317,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -297,8 +341,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -310,8 +354,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -323,8 +367,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -335,8 +379,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -347,10 +391,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloudfront/src/main/resources/codegen-resources/service-2.json b/services/cloudfront/src/main/resources/codegen-resources/service-2.json index 1ea9d33492cb..e2300702ae66 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudfront/src/main/resources/codegen-resources/service-2.json @@ -97,6 +97,7 @@ {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"}, {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, + {"shape":"TooManyDistributionsAssociatedToOriginAccessControl"}, {"shape":"NoSuchResponseHeadersPolicy"}, {"shape":"TooManyDistributionsAssociatedToResponseHeadersPolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, @@ -107,7 +108,7 @@ {"shape":"NoSuchRealtimeLogConfig"}, {"shape":"RealtimeLogConfigOwnerMismatch"} ], - "documentation":"Creates a staging distribution using the configuration of the provided primary distribution. A staging distribution is a copy of an existing distribution (called the primary distribution) that you can use in a continuous deployment workflow.
After you create a staging distribution, you can use UpdateDistribution to modify the staging distribution's configuration. Then you can use CreateContinuousDeploymentPolicy to incrementally move traffic to the staging distribution.
Creates a staging distribution using the configuration of the provided primary distribution. A staging distribution is a copy of an existing distribution (called the primary distribution) that you can use in a continuous deployment workflow.
After you create a staging distribution, you can use UpdateDistribution to modify the staging distribution's configuration. Then you can use CreateContinuousDeploymentPolicy to incrementally move traffic to the staging distribution.
This API operation requires the following IAM permissions:
" }, "CreateCachePolicy":{ "name":"CreateCachePolicy2020_05_31", @@ -260,6 +261,7 @@ {"shape":"InvalidOrigin"}, {"shape":"InvalidOriginAccessIdentity"}, {"shape":"InvalidOriginAccessControl"}, + {"shape":"IllegalOriginAccessConfiguration"}, {"shape":"AccessDenied"}, {"shape":"TooManyTrustedSigners"}, {"shape":"TrustedSignerDoesNotExist"}, @@ -307,6 +309,7 @@ {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"}, {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, + {"shape":"TooManyDistributionsAssociatedToOriginAccessControl"}, {"shape":"NoSuchResponseHeadersPolicy"}, {"shape":"TooManyDistributionsAssociatedToResponseHeadersPolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, @@ -320,7 +323,7 @@ {"shape":"NoSuchContinuousDeploymentPolicy"}, {"shape":"InvalidDomainNameForOriginAccessControl"} ], - "documentation":"Create a new distribution with tags.
" + "documentation":"Create a new distribution with tags. This API operation requires the following IAM permissions:
" }, "CreateFieldLevelEncryptionConfig":{ "name":"CreateFieldLevelEncryptionConfig2020_05_31", @@ -1780,6 +1783,7 @@ {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"}, {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, + {"shape":"TooManyDistributionsAssociatedToOriginAccessControl"}, {"shape":"NoSuchResponseHeadersPolicy"}, {"shape":"TooManyDistributionsAssociatedToResponseHeadersPolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, @@ -1857,6 +1861,7 @@ {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"}, {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, + {"shape":"TooManyDistributionsAssociatedToOriginAccessControl"}, {"shape":"NoSuchResponseHeadersPolicy"}, {"shape":"TooManyDistributionsAssociatedToResponseHeadersPolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, @@ -1867,7 +1872,7 @@ {"shape":"NoSuchRealtimeLogConfig"}, {"shape":"RealtimeLogConfigOwnerMismatch"} ], - "documentation":"Copies the staging distribution's configuration to its corresponding primary distribution. The primary distribution retains its Aliases (also known as alternate domain names or CNAMEs) and ContinuousDeploymentPolicyId value, but otherwise its configuration is overwritten to match the staging distribution.
You can use this operation in a continuous deployment workflow after you have tested configuration changes on the staging distribution. After using a continuous deployment policy to move a portion of your domain name's traffic to the staging distribution and verifying that it works as intended, you can use this operation to copy the staging distribution's configuration to the primary distribution. This action will disable the continuous deployment policy and move your domain's traffic back to the primary distribution.
" + "documentation":"Copies the staging distribution's configuration to its corresponding primary distribution. The primary distribution retains its Aliases (also known as alternate domain names or CNAMEs) and ContinuousDeploymentPolicyId value, but otherwise its configuration is overwritten to match the staging distribution.
You can use this operation in a continuous deployment workflow after you have tested configuration changes on the staging distribution. After using a continuous deployment policy to move a portion of your domain name's traffic to the staging distribution and verifying that it works as intended, you can use this operation to copy the staging distribution's configuration to the primary distribution. This action will disable the continuous deployment policy and move your domain's traffic back to the primary distribution.
This API operation requires the following IAM permissions:
" }, "UpdateFieldLevelEncryptionConfig":{ "name":"UpdateFieldLevelEncryptionConfig2020_05_31", @@ -2155,7 +2160,7 @@ "members":{ "Enabled":{ "shape":"boolean", - "documentation":"This field is true if any of the Amazon Web Services accounts in the list have active CloudFront key pairs that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false.
This field is true if any of the Amazon Web Services accounts in the list are configured as trusted signers. If not, this field is false.
A value that uniquely identifies a request to create a resource. This helps to prevent CloudFront from creating a duplicate resource if you accidentally resubmit an identical request.
" + }, + "Enabled":{ + "shape":"boolean", + "documentation":"A Boolean flag to specify the state of the staging distribution when it's created. When you set this value to True, the staging distribution is enabled. When you set this value to False, the staging distribution is disabled.
If you omit this field, the default value is True.
The function's runtime environment. The only valid value is cloudfront-js-1.0.
The function's runtime environment verion.
" } }, "documentation":"Contains configuration information about a CloudFront function.
" @@ -5156,7 +5165,10 @@ }, "FunctionRuntime":{ "type":"string", - "enum":["cloudfront-js-1.0"] + "enum":[ + "cloudfront-js-1.0", + "cloudfront-js-2.0" + ] }, "FunctionSizeLimitExceeded":{ "type":"structure", @@ -10342,7 +10354,7 @@ "members":{ "Enabled":{ "shape":"boolean", - "documentation":"This field is true if any of the Amazon Web Services accounts have public keys that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false.
This field is true if any of the Amazon Web Services accounts in the list are configured as trusted signers. If not, this field is false.
Updates an existing message template for messages that are sent through the voice channel.
" - }, - "VerifyOTPMessage": { - "name": "VerifyOTPMessage", - "http": { - "method": "POST", - "requestUri": "/v1/apps/{application-id}/verify-otp", - "responseCode": 200 - }, - "input": { - "shape": "VerifyOTPMessageRequest" - }, - "output": { - "shape": "VerifyOTPMessageResponse", - "documentation": "200 response
" - }, - "errors": [ - { - "shape": "BadRequestException", - "documentation": "400 response
" - }, - { - "shape": "InternalServerErrorException", - "documentation": "500 response
" - }, - { - "shape": "PayloadTooLargeException", - "documentation": "413 response
" - }, - { - "shape": "ForbiddenException", - "documentation": "403 response
" - }, - { - "shape": "NotFoundException", - "documentation": "404 response
" - }, - { - "shape": "MethodNotAllowedException", - "documentation": "405 response
" - }, - { - "shape": "TooManyRequestsException", - "documentation": "429 response
" - } - ], - "documentation": "Verify an OTP
" + }, + "VerifyOTPMessage": { + "name": "VerifyOTPMessage", + "http": { + "method": "POST", + "requestUri": "/v1/apps/{application-id}/verify-otp", + "responseCode": 200 + }, + "input": { + "shape": "VerifyOTPMessageRequest" + }, + "output": { + "shape": "VerifyOTPMessageResponse", + "documentation": "200 response
" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "400 response
" + }, + { + "shape": "InternalServerErrorException", + "documentation": "500 response
" + }, + { + "shape": "PayloadTooLargeException", + "documentation": "413 response
" + }, + { + "shape": "ForbiddenException", + "documentation": "403 response
" + }, + { + "shape": "NotFoundException", + "documentation": "404 response
" + }, + { + "shape": "MethodNotAllowedException", + "documentation": "405 response
" + }, + { + "shape": "TooManyRequestsException", + "documentation": "429 response
" + } + ], + "documentation": "Verify an OTP
" } }, "shapes": { @@ -6411,6 +6411,24 @@ "Name" ] }, + "ApplicationSettingsJourneyLimits": { + "type": "structure", + "members": { + "DailyCap": { + "shape": "__integer", + "documentation": "The daily number of messages that an endpoint can receive from all journeys. The maximum value is 100. If set to 0, this limit will not apply.
" + }, + "TimeframeCap": { + "shape": "JourneyTimeframeCap", + "documentation": "The default maximum number of messages that can be sent to an endpoint during the specified timeframe for all journeys.
" + }, + "TotalCap": { + "shape": "__integer", + "documentation": "The default maximum number of messages that a single journey can sent to a single endpoint. The maximum value is 100. If set to 0, this limit will not apply.
" + } + }, + "documentation": "The default sending limits for journeys in the application. To override these limits and define custom limits for a specific journey, use the Journey resource.
" + }, "ApplicationSettingsResource": { "type": "structure", "members": { @@ -6433,6 +6451,10 @@ "QuietTime": { "shape": "QuietTime", "documentation": "The default quiet time for campaigns in the application. Quiet time is a specific time range when messages aren't sent to endpoints, if all the following conditions are met:
The EndpointDemographic.Timezone property of the endpoint is set to a valid value.
The current time in the endpoint's time zone is later than or equal to the time specified by the QuietTime.Start property for the application (or a campaign or journey that has custom quiet time settings).
The current time in the endpoint's time zone is earlier than or equal to the time specified by the QuietTime.End property for the application (or a campaign or journey that has custom quiet time settings).
If any of the preceding conditions isn't met, the endpoint will receive messages from a campaign or journey, even if quiet time is enabled.
" + }, + "JourneyLimits": { + "shape": "ApplicationSettingsJourneyLimits", + "documentation": "The default sending limits for journeys in the application. These limits apply to each journey for the application but can be overridden, on a per journey basis, with the JourneyLimits resource.
" } }, "documentation": "Provides information about an application, including the default settings for an application.
", @@ -9484,15 +9506,20 @@ "shape": "__string", "documentation": "The Web API Key, also referred to as an API_KEY or server key, that you received from Google to communicate with Google services.
" }, + "DefaultAuthenticationMethod": { + "shape": "__string", + "documentation": "The default authentication method used for GCM. Values are either \"TOKEN\" or \"KEY\". Defaults to \"KEY\".
" + }, "Enabled": { "shape": "__boolean", "documentation": "Specifies whether to enable the GCM channel for the application.
" + }, + "ServiceJson": { + "shape": "__string", + "documentation": "The contents of the JSON file provided by Google during registration in order to generate an access token for authentication. For more information see Migrate from legacy FCM APIs to HTTP v1.
" } }, - "documentation": "Specifies the status and settings of the GCM channel for an application. This channel enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service.
", - "required": [ - "ApiKey" - ] + "documentation": "Specifies the status and settings of the GCM channel for an application. This channel enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service.
" }, "GCMChannelResponse": { "type": "structure", @@ -9509,6 +9536,10 @@ "shape": "__string", "documentation": "The Web API Key, also referred to as an API_KEY or server key, that you received from Google to communicate with Google services.
" }, + "DefaultAuthenticationMethod": { + "shape": "__string", + "documentation": "The default authentication method used for GCM. Values are either \"TOKEN\" or \"KEY\". Defaults to \"KEY\".
" + }, "Enabled": { "shape": "__boolean", "documentation": "Specifies whether the GCM channel is enabled for the application.
" @@ -9517,6 +9548,10 @@ "shape": "__boolean", "documentation": "(Not used) This property is retained only for backward compatibility.
" }, + "HasFcmServiceCredentials": { + "shape": "__boolean", + "documentation": "Returns true if the JSON file provided by Google during registration process was used in the ServiceJson field of the request.
" + }, "Id": { "shape": "__string", "documentation": "(Deprecated) An identifier for the GCM channel. This property is retained only for backward compatibility.
" @@ -9544,7 +9579,6 @@ }, "documentation": "Provides information about the status and settings of the GCM channel for an application. The GCM channel enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service.
", "required": [ - "Credential", "Platform" ] }, @@ -9579,9 +9613,13 @@ "shape": "__string", "documentation": "The URL of an image to display in the push notification.
" }, + "PreferredAuthenticationMethod": { + "shape": "__string", + "documentation": "The preferred authentication method, with valid values \"KEY\" or \"TOKEN\". If a value isn't provided then the DefaultAuthenticationMethod is used.
" + }, "Priority": { "shape": "__string", - "documentation": "para>normal - The notification might be delayed. Delivery is optimized for battery usage on the recipient's device. Use this value unless immediate delivery is required.
/listitem>high - The notification is sent immediately and might wake a sleeping device.
Amazon Pinpoint specifies this value in the FCM priority parameter when it sends the notification message to FCM.
The equivalent values for Apple Push Notification service (APNs) are 5, for normal, and 10, for high. If you specify an APNs value for this property, Amazon Pinpoint accepts and converts the value to the corresponding FCM value.
" + "documentation": "para>normal – The notification might be delayed. Delivery is optimized for battery usage on the recipient's device. Use this value unless immediate delivery is required.
/listitem>high – The notification is sent immediately and might wake a sleeping device.
Amazon Pinpoint specifies this value in the FCM priority parameter when it sends the notification message to FCM.
The equivalent values for Apple Push Notification service (APNs) are 5, for normal, and 10, for high. If you specify an APNs value for this property, Amazon Pinpoint accepts and converts the value to the corresponding FCM value.
" }, "RawContent": { "shape": "__string", @@ -12074,8 +12112,16 @@ "documentation": "The maximum number of messages that the journey can send each second.
" }, "EndpointReentryInterval": { - "shape": "__string", - "documentation": "Minimum time that must pass before an endpoint can re-enter a given journey. The duration should use an ISO 8601 format, such as PT1H.
" + "shape": "__string", + "documentation": "Minimum time that must pass before an endpoint can re-enter a given journey. The duration should use an ISO 8601 format, such as PT1H.
" + }, + "TimeframeCap": { + "shape": "JourneyTimeframeCap", + "documentation": "The number of messages that an endpoint can receive during the specified timeframe.
" + }, + "TotalCap": { + "shape": "__integer", + "documentation": "The maximum number of messages a journey can sent to a single endpoint. The maximum value is 100. If set to 0, this limit will not apply.
" } }, "documentation": "Specifies limits on the messages that a journey can send and the number of times participants can enter a journey.
" @@ -12092,16 +12138,16 @@ }, "JourneyChannelSettings": { "type": "structure", - "members": { - "ConnectCampaignArn": { - "shape": "__string", - "documentation": "Amazon Resource Name (ARN) of the Connect Campaign.
" - }, - "ConnectCampaignExecutionRoleArn": { - "shape": "__string", - "documentation": "IAM role ARN to be assumed when invoking Connect campaign execution APIs for dialing.
" - } + "members": { + "ConnectCampaignArn": { + "shape": "__string", + "documentation": "Amazon Resource Name (ARN) of the Connect Campaign.
" }, + "ConnectCampaignExecutionRoleArn": { + "shape": "__string", + "documentation": "IAM role ARN to be assumed when invoking Connect campaign execution APIs for dialing.
" + } + }, "documentation": "The channel-specific configurations for the journey.
" }, "JourneyResponse": { @@ -12181,16 +12227,16 @@ "documentation": "The channel-specific configurations for the journey.
" }, "SendingSchedule": { - "shape": "__boolean", - "documentation": "Indicates if journey has Advance Quiet Time enabled. This flag should be set to true in order to allow using OpenHours and ClosedDays.
" + "shape": "__boolean", + "documentation": "Indicates if journey has Advance Quiet Time enabled. This flag should be set to true in order to allow using OpenHours and ClosedDays.
" }, "OpenHours": { - "shape": "OpenHours", - "documentation": "The time when a journey can send messages. QuietTime should be configured first and SendingSchedule should be set to true.
" + "shape": "OpenHours", + "documentation": "The time when a journey can send messages. QuietTime should be configured first and SendingSchedule should be set to true.
" }, "ClosedDays": { - "shape": "ClosedDays", - "documentation": "The time when a journey will not send messages. QuietTime should be configured first and SendingSchedule should be set to true.
" + "shape": "ClosedDays", + "documentation": "The time when a journey will not send messages. QuietTime should be configured first and SendingSchedule should be set to true.
" }, "TimezoneEstimationMethods": { "shape": "ListOf__TimezoneEstimationMethodsElement", @@ -12388,6 +12434,20 @@ }, "documentation": "Changes the status of a journey.
" }, + "JourneyTimeframeCap": { + "type": "structure", + "members": { + "Cap": { + "shape": "__integer", + "documentation": "The maximum number of messages that all journeys can send to an endpoint during the specified timeframe. The maximum value is 100. If set to 0, this limit will not apply.
" + }, + "Days": { + "shape": "__integer", + "documentation": "The length of the timeframe in days. The maximum value is 30. If set to 0, this limit will not apply.
" + } + }, + "documentation": "The number of messages that can be sent to an endpoint during the specified timeframe for all journeys.
" + }, "JourneysResponse": { "type": "structure", "members": { @@ -14436,6 +14496,10 @@ "VoiceTemplate": { "shape": "Template", "documentation": "The voice template to use for the message. This object isn't supported for campaigns.
" + }, + "InAppTemplate": { + "shape": "Template", + "documentation": "The InApp template to use for the message. The InApp template object is not supported for SendMessages.
" } }, "documentation": "Specifies the message template to use for the message, for each type of channel.
" @@ -14492,7 +14556,7 @@ }, "TemplateType": { "shape": "TemplateType", - "documentation": "The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, and VOICE.
" + "documentation": "The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, INAPP, and VOICE.
" }, "Version": { "shape": "__string", @@ -14542,7 +14606,7 @@ }, "TemplateType": { "shape": "__string", - "documentation": "The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, and VOICE.
" + "documentation": "The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, INAPP, and VOICE.
" }, "Version": { "shape": "__string", @@ -15884,6 +15948,10 @@ "QuietTime": { "shape": "QuietTime", "documentation": "The default quiet time for campaigns in the application. Quiet time is a specific time range when messages aren't sent to endpoints, if all the following conditions are met:
The EndpointDemographic.Timezone property of the endpoint is set to a valid value.
The current time in the endpoint's time zone is later than or equal to the time specified by the QuietTime.Start property for the application (or a campaign or journey that has custom quiet time settings).
The current time in the endpoint's time zone is earlier than or equal to the time specified by the QuietTime.End property for the application (or a campaign or journey that has custom quiet time settings).
If any of the preceding conditions isn't met, the endpoint will receive messages from a campaign or journey, even if quiet time is enabled.
To override the default quiet time settings for a specific campaign or journey, use the Campaign resource or the Journey resource to define a custom quiet time for the campaign or journey.
" + }, + "JourneyLimits": { + "shape": "ApplicationSettingsJourneyLimits", + "documentation": "The default sending limits for journeys in the application. These limits apply to each journey for the application but can be overridden, on a per journey basis, with the JourneyLimits resource.
" } }, "documentation": "Specifies the default settings for an application.
" @@ -16033,28 +16101,28 @@ "documentation": "The status of the journey. Valid values are:
DRAFT - Saves the journey and doesn't publish it.
ACTIVE - Saves and publishes the journey. Depending on the journey's schedule, the journey starts running immediately or at the scheduled start time. If a journey's status is ACTIVE, you can't add, change, or remove activities from it.
PAUSED, CANCELLED, COMPLETED, and CLOSED states are not supported in requests to create or update a journey. To cancel, pause, or resume a journey, use the Journey State resource.
" }, "WaitForQuietTime": { - "shape": "__boolean", - "documentation": "Specifies whether endpoints in quiet hours should enter a wait till the end of their quiet hours.
" + "shape": "__boolean", + "documentation": "Specifies whether endpoints in quiet hours should enter a wait till the end of their quiet hours.
" }, "RefreshOnSegmentUpdate": { - "shape": "__boolean", - "documentation": "Indicates whether the journey participants should be refreshed when a segment is updated.
" + "shape": "__boolean", + "documentation": "Indicates whether the journey participants should be refreshed when a segment is updated.
" }, "JourneyChannelSettings": { - "shape": "JourneyChannelSettings", - "documentation": "The channel-specific configurations for the journey.
" + "shape": "JourneyChannelSettings", + "documentation": "The channel-specific configurations for the journey.
" }, "SendingSchedule": { - "shape": "__boolean", - "documentation": "Indicates if journey has Advance Quiet Time enabled. This flag should be set to true in order to allow using OpenHours and ClosedDays.
" + "shape": "__boolean", + "documentation": "Indicates if journey has Advance Quiet Time enabled. This flag should be set to true in order to allow using OpenHours and ClosedDays.
" }, "OpenHours": { - "shape": "OpenHours", - "documentation": "The time when journey allow to send messages. QuietTime should be configured first and SendingSchedule should be set to true.
" + "shape": "OpenHours", + "documentation": "The time when journey allow to send messages. QuietTime should be configured first and SendingSchedule should be set to true.
" }, "ClosedDays": { - "shape": "ClosedDays", - "documentation": "The time when journey will stop sending messages. QuietTime should be configured first and SendingSchedule should be set to true.
" + "shape": "ClosedDays", + "documentation": "The time when journey will stop sending messages. QuietTime should be configured first and SendingSchedule should be set to true.
" }, "TimezoneEstimationMethods": { "shape": "ListOf__TimezoneEstimationMethodsElement", @@ -16496,124 +16564,124 @@ "timestampFormat": "unixTimestamp" }, "DayOfWeek": { - "type": "string", - "enum": [ - "MONDAY", - "TUESDAY", - "WEDNESDAY", - "THURSDAY", - "FRIDAY", - "SATURDAY", - "SUNDAY" - ] + "type": "string", + "enum": [ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] }, "OpenHoursRule": { - "type": "structure", - "documentation": "Specifies the start and end time for OpenHours.
", - "members": { - "StartTime": { - "shape": "__string", - "documentation": "The start of the scheduled time, in ISO 8601 format, when the channel can send messages.
" - }, - "EndTime": { - "shape": "__string", - "documentation": "The end of the scheduled time, in ISO 8601 format, when the channel can't send messages.
" - } + "type": "structure", + "documentation": "Specifies the start and end time for OpenHours.
", + "members": { + "StartTime": { + "shape": "__string", + "documentation": "The start of the scheduled time, in ISO 8601 format, when the channel can send messages.
" + }, + "EndTime": { + "shape": "__string", + "documentation": "The end of the scheduled time, in ISO 8601 format, when the channel can't send messages.
" + } } }, "ListOfOpenHoursRules": { - "type": "list", - "member": { - "shape": "OpenHoursRule", - "documentation": "Open Hour Rule Details.
" - } + "type": "list", + "member": { + "shape": "OpenHoursRule", + "documentation": "Open Hour Rule Details.
" + } }, "MapOfListOfOpenHoursRules": { - "type": "map", - "key": { - "shape": "DayOfWeek", - "documentation": "Day of a week when the rule will be applied. Valid values are [MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY]
" - }, - "value": { - "shape": "ListOfOpenHoursRules", - "documentation": "Open Hour Rules.
" - } + "type": "map", + "key": { + "shape": "DayOfWeek", + "documentation": "Day of a week when the rule will be applied. Valid values are [MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY]
" + }, + "value": { + "shape": "ListOfOpenHoursRules", + "documentation": "Open Hour Rules.
" + } }, "OpenHours": { - "type": "structure", - "documentation": "Specifies the times when message are allowed to be sent to endpoints.
", - "members": { - "EMAIL": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "Specifies the schedule settings for the email channel.
" - }, - "SMS": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "Specifies the schedule settings for the SMS channel.
" - }, - "PUSH": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "Specifies the schedule settings for the push channel.
" - }, - "VOICE": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "Specifies the schedule settings for the voice channel.
" - }, - "CUSTOM": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "Specifies the schedule settings for the custom channel.
" - } + "type": "structure", + "documentation": "Specifies the times when message are allowed to be sent to endpoints.
", + "members": { + "EMAIL": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "Specifies the schedule settings for the email channel.
" + }, + "SMS": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "Specifies the schedule settings for the SMS channel.
" + }, + "PUSH": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "Specifies the schedule settings for the push channel.
" + }, + "VOICE": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "Specifies the schedule settings for the voice channel.
" + }, + "CUSTOM": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "Specifies the schedule settings for the custom channel.
" + } } }, "ClosedDaysRule": { - "type": "structure", - "documentation": "Specifies the rule settings for when messages can't be sent.
", - "members": { - "Name": { - "shape": "__string", - "documentation": "The name of the closed day rule.
" - }, - "StartDateTime": { - "shape": "__string", - "documentation": "Start DateTime ISO 8601 format
" - }, - "EndDateTime": { - "shape": "__string", - "documentation": "End DateTime ISO 8601 format
" - } + "type": "structure", + "documentation": "Specifies the rule settings for when messages can't be sent.
", + "members": { + "Name": { + "shape": "__string", + "documentation": "The name of the closed day rule.
" + }, + "StartDateTime": { + "shape": "__string", + "documentation": "Start DateTime ISO 8601 format
" + }, + "EndDateTime": { + "shape": "__string", + "documentation": "End DateTime ISO 8601 format
" + } } }, "ListOfClosedDaysRules": { - "type": "list", - "member": { - "shape": "ClosedDaysRule", - "documentation": "ClosedDays rule details.
" - } + "type": "list", + "member": { + "shape": "ClosedDaysRule", + "documentation": "ClosedDays rule details.
" + } }, "ClosedDays": { - "type": "structure", - "documentation": "The time when a journey will not send messages. QuietTime should be configured first and SendingSchedule should be set to true.
", - "members": { - "EMAIL": { - "shape": "ListOfClosedDaysRules", - "documentation": "Rules for the Email channel.
" - }, - "SMS": { - "shape": "ListOfClosedDaysRules", - "documentation": "Rules for the SMS channel.
" - }, - "PUSH": { - "shape": "ListOfClosedDaysRules", - "documentation": "Rules for the Push channel.
" - }, - "VOICE": { - "shape": "ListOfClosedDaysRules", - "documentation": "Rules for the Voice channel.
" - }, - "CUSTOM": { - "shape": "ListOfClosedDaysRules", - "documentation": "Rules for the Custom channel.
" - } + "type": "structure", + "documentation": "The time when a journey will not send messages. QuietTime should be configured first and SendingSchedule should be set to true.
", + "members": { + "EMAIL": { + "shape": "ListOfClosedDaysRules", + "documentation": "Rules for the Email channel.
" + }, + "SMS": { + "shape": "ListOfClosedDaysRules", + "documentation": "Rules for the SMS channel.
" + }, + "PUSH": { + "shape": "ListOfClosedDaysRules", + "documentation": "Rules for the Push channel.
" + }, + "VOICE": { + "shape": "ListOfClosedDaysRules", + "documentation": "Rules for the Voice channel.
" + }, + "CUSTOM": { + "shape": "ListOfClosedDaysRules", + "documentation": "Rules for the Custom channel.
" + } } } } From 89ef7a2641a2aa653c87bb7be9dcc1c52a0770b6 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 28 Jul 2023 18:10:01 +0000 Subject: [PATCH 061/270] Amazon Connect Service Update: This release adds support for new number types. --- .../feature-AmazonConnectService-4cad0d6.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AmazonConnectService-4cad0d6.json diff --git a/.changes/next-release/feature-AmazonConnectService-4cad0d6.json b/.changes/next-release/feature-AmazonConnectService-4cad0d6.json new file mode 100644 index 000000000000..3a78a51a32f5 --- /dev/null +++ b/.changes/next-release/feature-AmazonConnectService-4cad0d6.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds support for new number types." +} diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index 99d6c7c7e897..2dfb0061ea07 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -11937,13 +11937,17 @@ "type":"string", "enum":[ "TOLL_FREE", - "DID" + "DID", + "UIFN", + "SHARED", + "THIRD_PARTY_TF", + "THIRD_PARTY_DID" ] }, "PhoneNumberTypes":{ "type":"list", "member":{"shape":"PhoneNumberType"}, - "max":2 + "max":6 }, "PhoneNumberWorkflowMessage":{ "type":"string", From abccbf70c8bd4def516d0409df1ec6413ba75b7d Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 28 Jul 2023 18:10:05 +0000 Subject: [PATCH 062/270] Amazon CloudWatch Application Insights Update: This release enable customer to add/remove/update more than one workload for a component --- ...CloudWatchApplicationInsights-923bb92.json | 6 + .../codegen-resources/endpoint-rule-set.json | 399 +++--- .../codegen-resources/endpoint-tests.json | 1187 +++-------------- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 501 ++++++- 5 files changed, 949 insertions(+), 1149 deletions(-) create mode 100644 .changes/next-release/feature-AmazonCloudWatchApplicationInsights-923bb92.json diff --git a/.changes/next-release/feature-AmazonCloudWatchApplicationInsights-923bb92.json b/.changes/next-release/feature-AmazonCloudWatchApplicationInsights-923bb92.json new file mode 100644 index 000000000000..fea2af0a605f --- /dev/null +++ b/.changes/next-release/feature-AmazonCloudWatchApplicationInsights-923bb92.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "contributor": "", + "description": "This release enable customer to add/remove/update more than one workload for a component" +} diff --git a/services/applicationinsights/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/applicationinsights/src/main/resources/codegen-resources/endpoint-rule-set.json index 75e13ac5dd1c..5aa99ae02e8b 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://applicationinsights-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://applicationinsights-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://applicationinsights-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://applicationinsights.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://applicationinsights-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://applicationinsights.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://applicationinsights.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://applicationinsights.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/applicationinsights/src/main/resources/codegen-resources/endpoint-tests.json b/services/applicationinsights/src/main/resources/codegen-resources/endpoint-tests.json index 7f3fbce2a03d..af6b8c47b29d 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,770 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-1.amazonaws.com" + "url": "https://applicationinsights.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-northeast-1.api.aws" + "url": "https://applicationinsights.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -775,542 +34,534 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-northeast-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.me-south-1.api.aws" + "url": "https://applicationinsights.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.me-south-1.amazonaws.com" + "url": "https://applicationinsights.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.me-south-1.api.aws" + "url": "https://applicationinsights.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.me-south-1.amazonaws.com" + "url": "https://applicationinsights.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.sa-east-1.api.aws" + "url": "https://applicationinsights.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.sa-east-1.amazonaws.com" + "url": "https://applicationinsights.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.sa-east-1.api.aws" + "url": "https://applicationinsights.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.sa-east-1.amazonaws.com" + "url": "https://applicationinsights.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-east-1.api.aws" + "url": "https://applicationinsights.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-east-1.amazonaws.com" + "url": "https://applicationinsights.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-east-1.api.aws" + "url": "https://applicationinsights.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-east-1.amazonaws.com" + "url": "https://applicationinsights.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://applicationinsights.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.cn-north-1.amazonaws.com.cn" + "url": "https://applicationinsights.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://applicationinsights.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.cn-north-1.amazonaws.com.cn" + "url": "https://applicationinsights.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-gov-west-1.api.aws" + "url": "https://applicationinsights.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-gov-west-1.amazonaws.com" + "url": "https://applicationinsights.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights.us-gov-west-1.api.aws" + "url": "https://applicationinsights-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.us-gov-west-1.amazonaws.com" + "url": "https://applicationinsights-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-southeast-1.api.aws" + "url": "https://applicationinsights.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-southeast-1.amazonaws.com" + "url": "https://applicationinsights.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-southeast-1.api.aws" + "url": "https://applicationinsights.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-southeast-1.amazonaws.com" + "url": "https://applicationinsights-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-southeast-2.api.aws" + "url": "https://applicationinsights-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-southeast-2.amazonaws.com" + "url": "https://applicationinsights.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-southeast-2.api.aws" + "url": "https://applicationinsights.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-southeast-2.amazonaws.com" + "url": "https://applicationinsights.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-east-1.api.aws" + "url": "https://applicationinsights-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-east-1.amazonaws.com" + "url": "https://applicationinsights-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights.us-east-1.api.aws" + "url": "https://applicationinsights.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://applicationinsights.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-east-2.api.aws" + "url": "https://applicationinsights-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.us-east-2.api.aws" + "url": "https://applicationinsights.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://applicationinsights.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://applicationinsights-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://applicationinsights.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1320,9 +571,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1332,11 +583,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/applicationinsights/src/main/resources/codegen-resources/paginators-1.json b/services/applicationinsights/src/main/resources/codegen-resources/paginators-1.json index 2f237e4c9580..c9e1dab5611a 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/paginators-1.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/paginators-1.json @@ -29,6 +29,11 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListWorkloads": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/applicationinsights/src/main/resources/codegen-resources/service-2.json b/services/applicationinsights/src/main/resources/codegen-resources/service-2.json index 00880d07607d..7d1cbec399d8 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/service-2.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/service-2.json @@ -14,6 +14,22 @@ "uid":"application-insights-2018-11-25" }, "operations":{ + "AddWorkload":{ + "name":"AddWorkload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddWorkloadRequest"}, + "output":{"shape":"AddWorkloadResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Adds a workload to a component. Each component can have at most five workloads.
" + }, "CreateApplication":{ "name":"CreateApplication", "http":{ @@ -231,6 +247,21 @@ ], "documentation":"Describes the anomalies or errors associated with the problem.
" }, + "DescribeWorkload":{ + "name":"DescribeWorkload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkloadRequest"}, + "output":{"shape":"DescribeWorkloadResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Describes a workload and its configuration.
" + }, "ListApplications":{ "name":"ListApplications", "http":{ @@ -334,6 +365,36 @@ ], "documentation":"Retrieve a list of the tags (keys and values) that are associated with a specified application. A tag is a label that you optionally define and associate with an application. Each tag consists of a required tag key and an optional associated tag value. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.
" }, + "ListWorkloads":{ + "name":"ListWorkloads", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWorkloadsRequest"}, + "output":{"shape":"ListWorkloadsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Lists the workloads that are configured on a given component.
" + }, + "RemoveWorkload":{ + "name":"RemoveWorkload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveWorkloadRequest"}, + "output":{"shape":"RemoveWorkloadResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Remove workload from a component.
" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -424,6 +485,36 @@ {"shape":"InternalServerException"} ], "documentation":"Adds a log pattern to a LogPatternSet.
Updates the visibility of the problem or specifies the problem as RESOLVED.
Adds a workload to a component. Each component can have at most five workloads.
" } }, "shapes":{ @@ -435,6 +526,47 @@ "documentation":"User does not have permissions to perform this action.
", "exception":true }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d{12}$" + }, + "AddWorkloadRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName", + "WorkloadConfiguration" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"The name of the resource group.
" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"The name of the component.
" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"The configuration settings of the workload. The value is the escaped JSON of the configuration.
" + } + } + }, + "AddWorkloadResponse":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"The ID of the workload.
" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"The configuration settings of the workload. The value is the escaped JSON of the configuration.
" + } + } + }, "AffectedResource":{"type":"string"}, "AmazonResourceName":{ "type":"string", @@ -483,6 +615,10 @@ "ApplicationInfo":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the owner of the application.
" + }, "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"The name of the resource group used for the application.
" @@ -564,6 +700,14 @@ "ConfigurationEvent":{ "type":"structure", "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"The name of the resource group of the application to which the configuration event belongs.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the owner of the application to which the configuration event belongs.
" + }, "MonitoredResourceARN":{ "shape":"ConfigurationEventMonitoredResourceARN", "documentation":"The resource monitored by Application Insights.
" @@ -811,6 +955,10 @@ "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"The name of the resource group.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -842,6 +990,10 @@ "Tier":{ "shape":"Tier", "documentation":"The tier of the application component.
" + }, + "RecommendationType":{ + "shape":"RecommendationType", + "documentation":"The recommended configuration type.
" } } }, @@ -868,6 +1020,10 @@ "ComponentName":{ "shape":"ComponentName", "documentation":"The name of the component.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -902,6 +1058,10 @@ "ComponentName":{ "shape":"ComponentName", "documentation":"The name of the component.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -934,6 +1094,10 @@ "PatternName":{ "shape":"LogPatternName", "documentation":"The name of the log pattern.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -944,6 +1108,10 @@ "shape":"ResourceGroupName", "documentation":"The name of the resource group.
" }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" + }, "LogPattern":{ "shape":"LogPattern", "documentation":"The successfully created log pattern.
" @@ -957,6 +1125,10 @@ "ObservationId":{ "shape":"ObservationId", "documentation":"The ID of the observation.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -976,6 +1148,10 @@ "ProblemId":{ "shape":"ProblemId", "documentation":"The ID of the problem.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -995,6 +1171,10 @@ "ProblemId":{ "shape":"ProblemId", "documentation":"The ID of the problem.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the owner of the resource group affected by the problem.
" } } }, @@ -1007,6 +1187,49 @@ } } }, + "DescribeWorkloadRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName", + "WorkloadId" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"The name of the resource group.
" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"The name of the component.
" + }, + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"The ID of the workload.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the workload owner.
" + } + } + }, + "DescribeWorkloadResponse":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"The ID of the workload.
" + }, + "WorkloadRemarks":{ + "shape":"Remarks", + "documentation":"If logging is supported for the resource type, shows whether the component has configured logs to be monitored.
" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"The configuration settings of the workload. The value is the escaped JSON of the configuration.
" + } + } + }, "DetectedWorkload":{ "type":"map", "key":{"shape":"Tier"}, @@ -1076,6 +1299,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"The token to request the next page of results.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -1107,6 +1334,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"The token to request the next page of results.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -1149,6 +1380,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"The NextToken value returned from a previous paginated ListConfigurationHistory request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.
The AWS account ID for the resource group owner.
" } } }, @@ -1180,6 +1415,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"The token to request the next page of results.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -1190,6 +1429,10 @@ "shape":"ResourceGroupName", "documentation":"The name of the resource group.
" }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" + }, "LogPatternSets":{ "shape":"LogPatternSetList", "documentation":"The list of log pattern sets.
" @@ -1219,6 +1462,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"The token to request the next page of results.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -1229,6 +1476,10 @@ "shape":"ResourceGroupName", "documentation":"The name of the resource group.
" }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" + }, "LogPatterns":{ "shape":"LogPatternList", "documentation":"The list of log patterns.
" @@ -1242,6 +1493,10 @@ "ListProblemsRequest":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" + }, "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"The name of the resource group.
" @@ -1265,6 +1520,10 @@ "ComponentName":{ "shape":"ComponentName", "documentation":"The name of the component.
" + }, + "Visibility":{ + "shape":"Visibility", + "documentation":"Specifies whether or not you can view the problem. If not specified, visible and ignored problems are returned.
" } } }, @@ -1282,6 +1541,10 @@ "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"The name of the resource group.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the resource group owner.
" } } }, @@ -1304,6 +1567,48 @@ } } }, + "ListWorkloadsRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"The name of the resource group.
" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"The name of the component.
" + }, + "MaxResults":{ + "shape":"MaxEntities", + "documentation":"The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.
The token to request the next page of results.
" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID of the owner of the workload.
" + } + } + }, + "ListWorkloadsResponse":{ + "type":"structure", + "members":{ + "WorkloadList":{ + "shape":"WorkloadList", + "documentation":"The list of workloads.
" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"The token to request the next page of results.
" + } + } + }, "LogFilter":{ "type":"string", "enum":[ @@ -1624,6 +1929,10 @@ "shape":"SeverityLevel", "documentation":"A measure of the level of impact of the problem.
" }, + "AccountId":{ + "shape":"AccountId", + "documentation":"The AWS account ID for the owner of the resource group affected by the problem.
" + }, "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"The name of the resource group affected by the problem.
" @@ -1639,6 +1948,14 @@ "LastRecurrenceTime":{ "shape":"LastRecurrenceTime", "documentation":"The last time that the problem reoccurred after its last resolution.
" + }, + "Visibility":{ + "shape":"Visibility", + "documentation":"Specifies whether or not you can view the problem. Updates to ignored problems do not generate notifications.
" + }, + "ResolutionMethod":{ + "shape":"ResolutionMethod", + "documentation":"Specifies how the problem was resolved. If the value is AUTOMATIC, the system resolved the problem. If the value is MANUAL, the user resolved the problem. If the value is UNRESOLVED, then the problem is not resolved.
Describes a problem that is detected by correlating observations.
" @@ -1655,6 +1972,14 @@ }, "RdsEventCategories":{"type":"string"}, "RdsEventMessage":{"type":"string"}, + "RecommendationType":{ + "type":"string", + "enum":[ + "INFRA_ONLY", + "WORKLOAD_ONLY", + "ALL" + ] + }, "RecurringCount":{"type":"long"}, "RelatedObservations":{ "type":"structure", @@ -1668,6 +1993,41 @@ }, "Remarks":{"type":"string"}, "RemoveSNSTopic":{"type":"boolean"}, + "RemoveWorkloadRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName", + "WorkloadId" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"The name of the resource group.
" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"The name of the component.
" + }, + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"The ID of the workload.
" + } + } + }, + "RemoveWorkloadResponse":{ + "type":"structure", + "members":{ + } + }, + "ResolutionMethod":{ + "type":"string", + "enum":[ + "MANUAL", + "AUTOMATIC", + "UNRESOLVED" + ] + }, "ResourceARN":{ "type":"string", "max":1011, @@ -1729,7 +2089,8 @@ "IGNORE", "RESOLVED", "PENDING", - "RECURRING" + "RECURRING", + "RECOVERING" ] }, "Tag":{ @@ -1824,7 +2185,10 @@ "SAP_HANA_HIGH_AVAILABILITY", "SQL_SERVER_FAILOVER_CLUSTER_INSTANCE", "SHAREPOINT", - "ACTIVE_DIRECTORY" + "ACTIVE_DIRECTORY", + "SAP_NETWEAVER_STANDARD", + "SAP_NETWEAVER_DISTRIBUTED", + "SAP_NETWEAVER_HIGH_AVAILABILITY" ], "max":50, "min":1 @@ -2015,6 +2379,72 @@ } } }, + "UpdateProblemRequest":{ + "type":"structure", + "required":["ProblemId"], + "members":{ + "ProblemId":{ + "shape":"ProblemId", + "documentation":"The ID of the problem.
" + }, + "UpdateStatus":{ + "shape":"UpdateStatus", + "documentation":"The status of the problem. Arguments can be passed for only problems that show a status of RECOVERING.
The visibility of a problem. When you pass a value of IGNORED, the problem is removed from the default view, and all notifications for the problem are suspended. When VISIBLE is passed, the IGNORED action is reversed.
The name of the resource group.
" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"The name of the component.
" + }, + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"The ID of the workload.
" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"The configuration settings of the workload. The value is the escaped JSON of the configuration.
" + } + } + }, + "UpdateWorkloadResponse":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"The ID of the workload.
" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"The configuration settings of the workload. The value is the escaped JSON of the configuration.
" + } + } + }, "ValidationException":{ "type":"structure", "members":{ @@ -2024,11 +2454,78 @@ "exception":true }, "Value":{"type":"double"}, + "Visibility":{ + "type":"string", + "enum":[ + "IGNORED", + "VISIBLE" + ] + }, + "Workload":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"The ID of the workload.
" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"The name of the component.
" + }, + "WorkloadName":{ + "shape":"WorkloadName", + "documentation":"The name of the workload.
" + }, + "Tier":{ + "shape":"Tier", + "documentation":"The tier of the workload.
" + }, + "WorkloadRemarks":{ + "shape":"Remarks", + "documentation":"If logging is supported for the resource type, shows whether the component has configured logs to be monitored.
" + } + }, + "documentation":"Describes the workloads on a component.
" + }, + "WorkloadConfiguration":{ + "type":"structure", + "members":{ + "WorkloadName":{ + "shape":"WorkloadName", + "documentation":"The name of the workload.
" + }, + "Tier":{ + "shape":"Tier", + "documentation":"The configuration of the workload tier.
" + }, + "Configuration":{ + "shape":"ComponentConfiguration", + "documentation":"The configuration settings of the workload.
" + } + }, + "documentation":"The configuration of the workload.
" + }, + "WorkloadId":{ + "type":"string", + "max":38, + "min":38, + "pattern":"w-[0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{12}" + }, + "WorkloadList":{ + "type":"list", + "member":{"shape":"Workload"} + }, "WorkloadMetaData":{ "type":"map", "key":{"shape":"MetaDataKey"}, "value":{"shape":"MetaDataValue"} }, + "WorkloadName":{ + "type":"string", + "max":8, + "min":1, + "pattern":"[a-zA-Z0-9\\.\\-_]*" + }, "XRayErrorPercent":{"type":"integer"}, "XRayFaultPercent":{"type":"integer"}, "XRayNodeName":{"type":"string"}, From 453e13ed0fcfc3a620ee3a27d4984f74cd62c756 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 28 Jul 2023 18:10:07 +0000 Subject: [PATCH 063/270] AWS CloudFormation Update: This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet. --- .../feature-AWSCloudFormation-91c2d33.json | 6 + .../codegen-resources/endpoint-rule-set.json | 362 ++++++++---------- .../codegen-resources/endpoint-tests.json | 206 +++++----- .../codegen-resources/service-2.json | 26 +- 4 files changed, 297 insertions(+), 303 deletions(-) create mode 100644 .changes/next-release/feature-AWSCloudFormation-91c2d33.json diff --git a/.changes/next-release/feature-AWSCloudFormation-91c2d33.json b/.changes/next-release/feature-AWSCloudFormation-91c2d33.json new file mode 100644 index 000000000000..a583e0119d7c --- /dev/null +++ b/.changes/next-release/feature-AWSCloudFormation-91c2d33.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet." +} diff --git a/services/cloudformation/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudformation/src/main/resources/codegen-resources/endpoint-rule-set.json index 9cbca10387c8..226c1811ddf1 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudformation/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudformation-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://cloudformation-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://cloudformation.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://cloudformation-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://cloudformation.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://cloudformation-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudformation.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://cloudformation.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloudformation.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://cloudformation.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloudformation/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudformation/src/main/resources/codegen-resources/endpoint-tests.json index dcd0159a3812..b20e6d8f1e7a 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudformation/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "af-south-1", "UseFIPS": false, - "Region": "af-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-1", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -216,9 +216,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -242,9 +242,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -281,9 +281,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": true, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -294,9 +294,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -307,9 +307,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": true, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -320,9 +320,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -333,9 +333,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": true, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -346,9 +346,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -359,9 +359,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -372,9 +372,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -385,9 +385,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { @@ -398,9 +398,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -411,9 +411,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -424,9 +424,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -437,9 +437,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -450,9 +450,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -463,9 +463,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -476,9 +476,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -489,9 +489,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -502,9 +502,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -515,9 +515,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -528,9 +528,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-west-1", "UseFIPS": false, - "Region": "us-iso-west-1" + "UseDualStack": false } }, { @@ -539,9 +539,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -552,9 +552,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -563,9 +563,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -576,9 +576,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -587,9 +587,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -600,9 +600,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -611,9 +611,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -624,9 +624,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -638,8 +638,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -649,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -661,9 +661,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 1fc35f8e0e71..7fe1b649322d 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -1353,7 +1353,10 @@ "CFNRegistryException":{ "type":"structure", "members":{ - "Message":{"shape":"ErrorMessage"} + "Message":{ + "shape":"ErrorMessage", + "documentation":"An message with details about the error that occurred.
" + } }, "documentation":"An error occurred during a CloudFormation registry operation.
", "error":{ @@ -1856,6 +1859,10 @@ "EnableTerminationProtection":{ "shape":"EnableTerminationProtection", "documentation":"Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default.
For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack.
" + }, + "RetainExceptOnCreate":{ + "shape":"RetainExceptOnCreate", + "documentation":"This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.
The input for CreateStack action.
" @@ -3070,6 +3077,10 @@ "DisableRollback":{ "shape":"DisableRollback", "documentation":"Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure parameter to the CreateChangeSet API operation was specified.
True - if the stack creation fails, do nothing. This is equivalent to specifying DO_NOTHING for the OnStackFailure parameter to the CreateChangeSet API operation.
False - if the stack creation fails, roll back the stack. This is equivalent to specifying ROLLBACK for the OnStackFailure parameter to the CreateChangeSet API operation.
Default: True
This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.
The input for the ExecuteChangeSet action.
" @@ -4863,6 +4874,7 @@ "type":"list", "member":{"shape":"ResourceToSkip"} }, + "RetainExceptOnCreate":{"type":"boolean"}, "RetainResources":{ "type":"list", "member":{"shape":"LogicalResourceId"} @@ -4910,6 +4922,10 @@ "ClientRequestToken":{ "shape":"ClientRequestToken", "documentation":"A unique identifier for this RollbackStack request.
This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.
Information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.
" + }, + "RetainExceptOnCreate":{ + "shape":"RetainExceptOnCreate", + "documentation":"This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.
The Stack data type.
" @@ -6941,6 +6961,10 @@ "ClientRequestToken":{ "shape":"ClientRequestToken", "documentation":"A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that CloudFormation successfully received them.
All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.
In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.
This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.
The input for an UpdateStack action.
" From ab9388fc7e15993761e3b0d92cc14de998aa8cfe Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 28 Jul 2023 18:11:16 +0000 Subject: [PATCH 064/270] Updated endpoints.json and partitions.json. --- .../feature-AWSSDKforJavav2-0443982.json | 6 ++ .../regions/internal/region/endpoints.json | 78 ++----------------- 2 files changed, 14 insertions(+), 70 deletions(-) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json new file mode 100644 index 000000000000..e5b5ee3ca5e3 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." +} diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 186f785124e8..2f81ba3db987 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -4795,16 +4795,21 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -7305,9 +7310,11 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -14855,41 +14862,6 @@ }, "sms" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "sms-fips.us-east-1.amazonaws.com" - }, - "fips-us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "deprecated" : true, - "hostname" : "sms-fips.us-east-2.amazonaws.com" - }, - "fips-us-west-1" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "deprecated" : true, - "hostname" : "sms-fips.us-west-1.amazonaws.com" - }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -14897,26 +14869,6 @@ "deprecated" : true, "hostname" : "sms-fips.us-west-2.amazonaws.com" }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { - "variants" : [ { - "hostname" : "sms-fips.us-east-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-east-2" : { - "variants" : [ { - "hostname" : "sms-fips.us-east-2.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-west-1" : { - "variants" : [ { - "hostname" : "sms-fips.us-west-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, "us-west-2" : { "variants" : [ { "hostname" : "sms-fips.us-west-2.amazonaws.com", @@ -19040,8 +18992,7 @@ }, "sms" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { } } }, "snowball" : { @@ -22977,13 +22928,6 @@ }, "sms" : { "endpoints" : { - "fips-us-gov-east-1" : { - "credentialScope" : { - "region" : "us-gov-east-1" - }, - "deprecated" : true, - "hostname" : "sms-fips.us-gov-east-1.amazonaws.com" - }, "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" @@ -22991,12 +22935,6 @@ "deprecated" : true, "hostname" : "sms-fips.us-gov-west-1.amazonaws.com" }, - "us-gov-east-1" : { - "variants" : [ { - "hostname" : "sms-fips.us-gov-east-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, "us-gov-west-1" : { "variants" : [ { "hostname" : "sms-fips.us-gov-west-1.amazonaws.com", From 465eab46de63baaef093316d92fa1f6ce850aadd Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 28 Jul 2023 18:12:22 +0000 Subject: [PATCH 065/270] Release 2.20.115. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.115.json | 48 +++++++++++++++++++ .../feature-AWSCloudFormation-91c2d33.json | 6 --- .../feature-AWSSDKforJavav2-0443982.json | 6 --- .../feature-AmazonCloudFront-8a88fc0.json | 6 --- ...CloudWatchApplicationInsights-923bb92.json | 6 --- .../feature-AmazonConnectService-4cad0d6.json | 6 --- .../feature-AmazonPinpoint-42546f9.json | 6 --- ...ture-ManagedStreamingforKafka-f746510.json | 6 --- CHANGELOG.md | 31 +++++++++++- README.md | 8 ++-- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- 426 files changed, 498 insertions(+), 463 deletions(-) create mode 100644 .changes/2.20.115.json delete mode 100644 .changes/next-release/feature-AWSCloudFormation-91c2d33.json delete mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json delete mode 100644 .changes/next-release/feature-AmazonCloudFront-8a88fc0.json delete mode 100644 .changes/next-release/feature-AmazonCloudWatchApplicationInsights-923bb92.json delete mode 100644 .changes/next-release/feature-AmazonConnectService-4cad0d6.json delete mode 100644 .changes/next-release/feature-AmazonPinpoint-42546f9.json delete mode 100644 .changes/next-release/feature-ManagedStreamingforKafka-f746510.json diff --git a/.changes/2.20.115.json b/.changes/2.20.115.json new file mode 100644 index 000000000000..5b4f41b6d87e --- /dev/null +++ b/.changes/2.20.115.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.115", + "date": "2023-07-28", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "contributor": "", + "description": "Add a new JavaScript runtime version for CloudFront Functions." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "contributor": "", + "description": "This release enable customer to add/remove/update more than one workload for a component" + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds support for new number types." + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "contributor": "", + "description": "Added support for sending push notifications using the FCM v1 API with json credentials. Amazon Pinpoint customers can now deliver messages to Android devices using both FCM v1 API and the legacy FCM/GCM API" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "contributor": "", + "description": "Amazon MSK has introduced new versions of ListClusterOperations and DescribeClusterOperation APIs. These v2 APIs provide information and insights into the ongoing operations of both MSK Provisioned and MSK Serverless clusters." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AWSCloudFormation-91c2d33.json b/.changes/next-release/feature-AWSCloudFormation-91c2d33.json deleted file mode 100644 index a583e0119d7c..000000000000 --- a/.changes/next-release/feature-AWSCloudFormation-91c2d33.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS CloudFormation", - "contributor": "", - "description": "This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet." -} diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json deleted file mode 100644 index e5b5ee3ca5e3..000000000000 --- a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS SDK for Java v2", - "contributor": "", - "description": "Updated endpoint and partition metadata." -} diff --git a/.changes/next-release/feature-AmazonCloudFront-8a88fc0.json b/.changes/next-release/feature-AmazonCloudFront-8a88fc0.json deleted file mode 100644 index 8fe2453c9ea0..000000000000 --- a/.changes/next-release/feature-AmazonCloudFront-8a88fc0.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon CloudFront", - "contributor": "", - "description": "Add a new JavaScript runtime version for CloudFront Functions." -} diff --git a/.changes/next-release/feature-AmazonCloudWatchApplicationInsights-923bb92.json b/.changes/next-release/feature-AmazonCloudWatchApplicationInsights-923bb92.json deleted file mode 100644 index fea2af0a605f..000000000000 --- a/.changes/next-release/feature-AmazonCloudWatchApplicationInsights-923bb92.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon CloudWatch Application Insights", - "contributor": "", - "description": "This release enable customer to add/remove/update more than one workload for a component" -} diff --git a/.changes/next-release/feature-AmazonConnectService-4cad0d6.json b/.changes/next-release/feature-AmazonConnectService-4cad0d6.json deleted file mode 100644 index 3a78a51a32f5..000000000000 --- a/.changes/next-release/feature-AmazonConnectService-4cad0d6.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Connect Service", - "contributor": "", - "description": "This release adds support for new number types." -} diff --git a/.changes/next-release/feature-AmazonPinpoint-42546f9.json b/.changes/next-release/feature-AmazonPinpoint-42546f9.json deleted file mode 100644 index aa0b280ae226..000000000000 --- a/.changes/next-release/feature-AmazonPinpoint-42546f9.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Pinpoint", - "contributor": "", - "description": "Added support for sending push notifications using the FCM v1 API with json credentials. Amazon Pinpoint customers can now deliver messages to Android devices using both FCM v1 API and the legacy FCM/GCM API" -} diff --git a/.changes/next-release/feature-ManagedStreamingforKafka-f746510.json b/.changes/next-release/feature-ManagedStreamingforKafka-f746510.json deleted file mode 100644 index 93cecc120d62..000000000000 --- a/.changes/next-release/feature-ManagedStreamingforKafka-f746510.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Managed Streaming for Kafka", - "contributor": "", - "description": "Amazon MSK has introduced new versions of ListClusterOperations and DescribeClusterOperation APIs. These v2 APIs provide information and insights into the ongoing operations of both MSK Provisioned and MSK Serverless clusters." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 040402f8ab15..cccdf237d975 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,32 @@ +# __2.20.115__ __2023-07-28__ +## __AWS CloudFormation__ + - ### Features + - This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon CloudFront__ + - ### Features + - Add a new JavaScript runtime version for CloudFront Functions. + +## __Amazon CloudWatch Application Insights__ + - ### Features + - This release enable customer to add/remove/update more than one workload for a component + +## __Amazon Connect Service__ + - ### Features + - This release adds support for new number types. + +## __Amazon Pinpoint__ + - ### Features + - Added support for sending push notifications using the FCM v1 API with json credentials. Amazon Pinpoint customers can now deliver messages to Android devices using both FCM v1 API and the legacy FCM/GCM API + +## __Managed Streaming for Kafka__ + - ### Features + - Amazon MSK has introduced new versions of ListClusterOperations and DescribeClusterOperation APIs. These v2 APIs provide information and insights into the ongoing operations of both MSK Provisioned and MSK Serverless clusters. + # __2.20.114__ __2023-07-27__ ## __Amazon Simple Queue Service__ - ### Features @@ -862,7 +891,7 @@ Special thanks to the following contributors to this release: ## __Contributors__ Special thanks to the following contributors to this release: -[@breader124](https://github.com/breader124), [@bmaizels](https://github.com/bmaizels) +[@bmaizels](https://github.com/bmaizels), [@breader124](https://github.com/breader124) # __2.20.85__ __2023-06-13__ ## __AWS CloudTrail__ - ### Features diff --git a/README.md b/README.md index 0fe67dac38e8..e4639297ecd1 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same verCreates a new form for an Amplify.
", + "documentation":"Creates a new form for an Amplify app.
", "idempotent":true }, "CreateTheme":{ @@ -356,7 +356,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ThrottlingException"} ], - "documentation":"Starts a code generation job for for a specified Amplify app and backend environment.
" + "documentation":"Starts a code generation job for a specified Amplify app and backend environment.
" }, "UpdateComponent":{ "name":"UpdateComponent", @@ -453,6 +453,25 @@ }, "documentation":"Represents the event action configuration for an element of a Component or ComponentChild. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components. ActionParameters defines the action that is performed when an event occurs on the component.
The configuration for an application using GraphQL APIs.
" + }, + "dataStoreConfig":{ + "shape":"DataStoreRenderConfig", + "documentation":"The configuration for an application using DataStore APIs.
" + }, + "noApiConfig":{ + "shape":"NoApiRenderConfig", + "documentation":"The configuration for an application with no API being used.
" + } + }, + "documentation":"Describes the API configuration for a code generation job.
", + "union":true + }, "AppId":{ "type":"string", "max":20, @@ -763,7 +782,7 @@ "documentation":"The name of the ReactStartCodegenJobData object.
Describes the configuration information for rendering the UI component associated the code generation job.
", + "documentation":"Describes the configuration information for rendering the UI component associated with the code generation job.
", "union":true }, "CodegenJobStatus":{ @@ -1533,6 +1552,12 @@ }, "payload":"entity" }, + "DataStoreRenderConfig":{ + "type":"structure", + "members":{ + }, + "documentation":"Describes the DataStore configuration for an API for a code generation job.
" + }, "DeleteComponentRequest":{ "type":"structure", "required":[ @@ -2509,6 +2534,39 @@ }, "payload":"theme" }, + "GraphQLRenderConfig":{ + "type":"structure", + "required":[ + "typesFilePath", + "queriesFilePath", + "mutationsFilePath", + "subscriptionsFilePath", + "fragmentsFilePath" + ], + "members":{ + "typesFilePath":{ + "shape":"String", + "documentation":"The path to the GraphQL types file, relative to the component output directory.
" + }, + "queriesFilePath":{ + "shape":"String", + "documentation":"The path to the GraphQL queries file, relative to the component output directory.
" + }, + "mutationsFilePath":{ + "shape":"String", + "documentation":"The path to the GraphQL mutations file, relative to the component output directory.
" + }, + "subscriptionsFilePath":{ + "shape":"String", + "documentation":"The path to the GraphQL subscriptions file, relative to the component output directory.
" + }, + "fragmentsFilePath":{ + "shape":"String", + "documentation":"The path to the GraphQL fragments file, relative to the component output directory.
" + } + }, + "documentation":"Describes the GraphQL configuration for an API for a code generation job.
" + }, "IdentifierList":{ "type":"list", "member":{"shape":"String"} @@ -2801,6 +2859,12 @@ }, "documentation":"Represents the state configuration when an action modifies a property of another element within the same component.
" }, + "NoApiRenderConfig":{ + "type":"structure", + "members":{ + }, + "documentation":"Describes the configuration for an application with no API being used.
" + }, "NumValues":{ "type":"list", "member":{"shape":"Integer"} @@ -2910,6 +2974,10 @@ "inlineSourceMap":{ "shape":"Boolean", "documentation":"Specifies whether the code generation job should render inline source maps.
" + }, + "apiConfiguration":{ + "shape":"ApiConfiguration", + "documentation":"The API configuration for the code generation job.
" } }, "documentation":"Describes the code generation job configuration for a React project.
" From e6a6630ac49a6eb17068f9d35bf81dc99bc868d5 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:17 +0000 Subject: [PATCH 068/270] Elastic Disaster Recovery Service Update: Add support for in-aws right sizing --- .../feature-ElasticDisasterRecoveryService-491d599.json | 6 ++++++ .../drs/src/main/resources/codegen-resources/service-2.json | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-ElasticDisasterRecoveryService-491d599.json diff --git a/.changes/next-release/feature-ElasticDisasterRecoveryService-491d599.json b/.changes/next-release/feature-ElasticDisasterRecoveryService-491d599.json new file mode 100644 index 000000000000..aaa592a65005 --- /dev/null +++ b/.changes/next-release/feature-ElasticDisasterRecoveryService-491d599.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Elastic Disaster Recovery Service", + "contributor": "", + "description": "Add support for in-aws right sizing" +} diff --git a/services/drs/src/main/resources/codegen-resources/service-2.json b/services/drs/src/main/resources/codegen-resources/service-2.json index db8d44dd9fe7..8da10f76c5cd 100644 --- a/services/drs/src/main/resources/codegen-resources/service-2.json +++ b/services/drs/src/main/resources/codegen-resources/service-2.json @@ -4050,7 +4050,8 @@ "type":"string", "enum":[ "NONE", - "BASIC" + "BASIC", + "IN_AWS" ] }, "TerminateRecoveryInstancesRequest":{ From 108e9c625f730378bc29eb523d2dbe983296b1be Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:18 +0000 Subject: [PATCH 069/270] AWS CodeStar connections Update: New integration with the Gitlab provider type. --- ...eature-AWSCodeStarconnections-9a28fc4.json | 6 + .../codegen-resources/endpoint-rule-set.json | 399 +++++----- .../codegen-resources/endpoint-tests.json | 714 +++++------------- .../codegen-resources/service-2.json | 26 +- 4 files changed, 412 insertions(+), 733 deletions(-) create mode 100644 .changes/next-release/feature-AWSCodeStarconnections-9a28fc4.json diff --git a/.changes/next-release/feature-AWSCodeStarconnections-9a28fc4.json b/.changes/next-release/feature-AWSCodeStarconnections-9a28fc4.json new file mode 100644 index 000000000000..8bcbbfe1b629 --- /dev/null +++ b/.changes/next-release/feature-AWSCodeStarconnections-9a28fc4.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS CodeStar connections", + "contributor": "", + "description": "New integration with the Gitlab provider type." +} diff --git a/services/codestarconnections/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/codestarconnections/src/main/resources/codegen-resources/endpoint-rule-set.json index 698d9be7ea10..546831826172 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codestar-connections-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codestar-connections-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://codestar-connections-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codestar-connections.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://codestar-connections-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://codestar-connections.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://codestar-connections.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://codestar-connections.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/codestarconnections/src/main/resources/codegen-resources/endpoint-tests.json b/services/codestarconnections/src/main/resources/codegen-resources/endpoint-tests.json index 5c75a579525e..c384373745c0 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-south-1.amazonaws.com" + "url": "https://codestar-connections.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-south-1.api.aws" + "url": "https://codestar-connections.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,48 +34,35 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ca-central-1.amazonaws.com" + "url": "https://codestar-connections.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ca-central-1.api.aws" + "url": "https://codestar-connections.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -99,48 +73,9 @@ } }, "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -151,152 +86,9 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -307,100 +99,22 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.eu-west-2.api.aws" + "url": "https://codestar-connections.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -411,438 +125,352 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.eu-west-1.api.aws" + "url": "https://codestar-connections.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.eu-west-1.amazonaws.com" + "url": "https://codestar-connections.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-northeast-2.api.aws" + "url": "https://codestar-connections.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-northeast-2.amazonaws.com" + "url": "https://codestar-connections.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-northeast-2.api.aws" + "url": "https://codestar-connections.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-northeast-2.amazonaws.com" + "url": "https://codestar-connections.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-northeast-1.api.aws" + "url": "https://codestar-connections-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-northeast-1.amazonaws.com" + "url": "https://codestar-connections-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-northeast-1.api.aws" + "url": "https://codestar-connections.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-northeast-1.amazonaws.com" + "url": "https://codestar-connections-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.sa-east-1.api.aws" + "url": "https://codestar-connections-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.sa-east-1.amazonaws.com" + "url": "https://codestar-connections.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.sa-east-1.api.aws" + "url": "https://codestar-connections.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections.sa-east-1.amazonaws.com" + "url": "https://codestar-connections-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-southeast-1.api.aws" + "url": "https://codestar-connections-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-southeast-1.amazonaws.com" + "url": "https://codestar-connections.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-southeast-1.api.aws" + "url": "https://codestar-connections.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codestar-connections.ap-southeast-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-southeast-2.api.aws" + "url": "https://codestar-connections-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.ap-southeast-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-southeast-2.api.aws" + "url": "https://codestar-connections.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codestar-connections.ap-southeast-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.us-east-1.api.aws" + "url": "https://codestar-connections-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-east-1.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.us-east-1.api.aws" + "url": "https://codestar-connections.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-east-2.amazonaws.com" - } - }, - "params": { + "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -852,9 +480,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -864,11 +492,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json index a1316735c544..6a5ab07ced57 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"Creates a connection that can then be given to other AWS services like CodePipeline so that it can access third-party code repositories. The connection is in pending status until the third-party connection handshake is completed from the console.
" + "documentation":"Creates a connection that can then be given to other Amazon Web Services services like CodePipeline so that it can access third-party code repositories. The connection is in pending status until the third-party connection handshake is completed from the console.
" }, "CreateHost":{ "name":"CreateHost", @@ -104,6 +104,9 @@ }, "input":{"shape":"ListConnectionsInput"}, "output":{"shape":"ListConnectionsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], "documentation":"Lists the connections associated with your account.
" }, "ListHosts":{ @@ -154,7 +157,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"Removes tags from an AWS resource.
" + "documentation":"Removes tags from an Amazon Web Services resource.
" }, "UpdateHost":{ "name":"UpdateHost", @@ -199,11 +202,11 @@ "members":{ "ConnectionName":{ "shape":"ConnectionName", - "documentation":"The name of the connection. Connection names must be unique in an AWS user account.
" + "documentation":"The name of the connection. Connection names must be unique in an Amazon Web Services account.
" }, "ConnectionArn":{ "shape":"ConnectionArn", - "documentation":"The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between AWS services.
The ARN is never reused if the connection is deleted.
The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between Amazon Web Services.
The ARN is never reused if the connection is deleted.
The Amazon Resource Name (ARN) of the host associated with the connection.
" } }, - "documentation":"A resource that is used to connect third-party source providers with services like AWS CodePipeline.
Note: A connection created through CloudFormation, the CLI, or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by updating the connection in the console.
" + "documentation":"A resource that is used to connect third-party source providers with services like CodePipeline.
Note: A connection created through CloudFormation, the CLI, or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by updating the connection in the console.
" }, "ConnectionArn":{ "type":"string", @@ -258,7 +261,7 @@ }, "ConnectionName":{ "shape":"ConnectionName", - "documentation":"The name of the connection to be created. The name must be unique in the calling AWS account.
" + "documentation":"The name of the connection to be created.
" }, "Tags":{ "shape":"TagList", @@ -276,7 +279,7 @@ "members":{ "ConnectionArn":{ "shape":"ConnectionArn", - "documentation":"The Amazon Resource Name (ARN) of the connection to be created. The ARN is used as the connection reference when the connection is shared between AWS services.
The ARN is never reused if the connection is deleted.
The Amazon Resource Name (ARN) of the connection to be created. The ARN is used as the connection reference when the connection is shared between Amazon Web Services services.
The ARN is never reused if the connection is deleted.
The name of the host to be created. The name must be unique in the calling AWS account.
" + "documentation":"The name of the host to be created.
" }, "ProviderType":{ "shape":"ProviderType", @@ -569,7 +572,8 @@ "enum":[ "Bitbucket", "GitHub", - "GitHubEnterpriseServer" + "GitHubEnterpriseServer", + "GitLab" ] }, "ResourceNotFoundException":{ @@ -628,7 +632,7 @@ "documentation":"The tag's value.
" } }, - "documentation":"A tag is a key-value pair that is used to manage the resource.
This tag is available for use by AWS services that support tags.
" + "documentation":"A tag is a key-value pair that is used to manage the resource.
This tag is available for use by Amazon Web Services services that support tags.
" }, "TagKey":{ "type":"string", @@ -775,5 +779,5 @@ "pattern":"vpc-\\w{8}(\\w{9})?" } }, - "documentation":"This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the connections API to work with connections and installations.
Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.
When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one.
When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections.
You can work with connections by calling:
CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.
DeleteConnection, which deletes the specified connection.
GetConnection, which returns information about the connection, including the connection status.
ListConnections, which lists the connections associated with your account.
You can work with hosts by calling:
CreateHost, which creates a host that represents the infrastructure where your provider is installed.
DeleteHost, which deletes the specified host.
GetHost, which returns information about the host, including the setup status.
ListHosts, which lists the hosts associated with your account.
You can work with tags in AWS CodeStar Connections by calling the following:
ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeStar Connections.
TagResource, which adds or updates tags for a resource in AWS CodeStar Connections.
UntagResource, which removes tags for a resource in AWS CodeStar Connections.
For information about how to use AWS CodeStar Connections, see the Developer Tools User Guide.
" + "documentation":"This Amazon Web Services CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the Amazon Web Services CodeStar Connections API. You can use the connections API to work with connections and installations.
Connections are configurations that you use to connect Amazon Web Services resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.
When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one.
When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections.
You can work with connections by calling:
CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.
DeleteConnection, which deletes the specified connection.
GetConnection, which returns information about the connection, including the connection status.
ListConnections, which lists the connections associated with your account.
You can work with hosts by calling:
CreateHost, which creates a host that represents the infrastructure where your provider is installed.
DeleteHost, which deletes the specified host.
GetHost, which returns information about the host, including the setup status.
ListHosts, which lists the hosts associated with your account.
You can work with tags in Amazon Web Services CodeStar Connections by calling the following:
ListTagsForResource, which gets information about Amazon Web Services tags for a specified Amazon Resource Name (ARN) in Amazon Web Services CodeStar Connections.
TagResource, which adds or updates tags for a resource in Amazon Web Services CodeStar Connections.
UntagResource, which removes tags for a resource in Amazon Web Services CodeStar Connections.
For information about how to use Amazon Web Services CodeStar Connections, see the Developer Tools User Guide.
" } From 34c69530a4e265aa72f23c4f20d4c35af1e0a9ce Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:25 +0000 Subject: [PATCH 070/270] AWS Clean Rooms Service Update: This release introduces custom SQL queries - an expanded set of SQL you can run. This release adds analysis templates, a new resource for storing pre-defined custom SQL queries ahead of time. This release also adds the Custom analysis rule, which lets you approve analysis templates for querying. --- .../feature-AWSCleanRoomsService-7901fb6.json | 6 + .../codegen-resources/endpoint-tests.json | 86 +- .../codegen-resources/paginators-1.json | 12 + .../codegen-resources/service-2.json | 983 +++++++++++++++++- 4 files changed, 1007 insertions(+), 80 deletions(-) create mode 100644 .changes/next-release/feature-AWSCleanRoomsService-7901fb6.json diff --git a/.changes/next-release/feature-AWSCleanRoomsService-7901fb6.json b/.changes/next-release/feature-AWSCleanRoomsService-7901fb6.json new file mode 100644 index 000000000000..3178db0a74b0 --- /dev/null +++ b/.changes/next-release/feature-AWSCleanRoomsService-7901fb6.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Clean Rooms Service", + "contributor": "", + "description": "This release introduces custom SQL queries - an expanded set of SQL you can run. This release adds analysis templates, a new resource for storing pre-defined custom SQL queries ahead of time. This release also adds the Custom analysis rule, which lets you approve analysis templates for querying." +} diff --git a/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json b/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json index 256ebd8e2db9..a128e3bf3db3 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -123,9 +123,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -147,9 +147,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -160,9 +160,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -173,9 +173,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -186,9 +186,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -199,9 +199,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -247,9 +247,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -260,9 +260,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } } diff --git a/services/cleanrooms/src/main/resources/codegen-resources/paginators-1.json b/services/cleanrooms/src/main/resources/codegen-resources/paginators-1.json index 52d02290f001..62fee6a7230f 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/paginators-1.json +++ b/services/cleanrooms/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,17 @@ { "pagination": { + "ListAnalysisTemplates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "analysisTemplateSummaries" + }, + "ListCollaborationAnalysisTemplates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "collaborationAnalysisTemplateSummaries" + }, "ListCollaborations": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/cleanrooms/src/main/resources/codegen-resources/service-2.json b/services/cleanrooms/src/main/resources/codegen-resources/service-2.json index 6c2c346e4929..5560200444a2 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/service-2.json +++ b/services/cleanrooms/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,24 @@ "uid":"cleanrooms-2022-02-17" }, "operations":{ + "BatchGetCollaborationAnalysisTemplate":{ + "name":"BatchGetCollaborationAnalysisTemplate", + "http":{ + "method":"POST", + "requestUri":"/collaborations/{collaborationIdentifier}/batch-analysistemplates", + "responseCode":200 + }, + "input":{"shape":"BatchGetCollaborationAnalysisTemplateInput"}, + "output":{"shape":"BatchGetCollaborationAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Retrieves multiple analysis templates within a collaboration by their Amazon Resource Names (ARNs).
" + }, "BatchGetSchema":{ "name":"BatchGetSchema", "http":{ @@ -30,6 +48,26 @@ ], "documentation":"Retrieves multiple schemas by their identifiers.
" }, + "CreateAnalysisTemplate":{ + "name":"CreateAnalysisTemplate", + "http":{ + "method":"POST", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates", + "responseCode":200 + }, + "input":{"shape":"CreateAnalysisTemplateInput"}, + "output":{"shape":"CreateAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Creates a new analysis template.
" + }, "CreateCollaboration":{ "name":"CreateCollaboration", "http":{ @@ -129,6 +167,25 @@ ], "documentation":"Creates a membership for a specific collaboration identifier and joins the collaboration.
" }, + "DeleteAnalysisTemplate":{ + "name":"DeleteAnalysisTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates/{analysisTemplateIdentifier}", + "responseCode":204 + }, + "input":{"shape":"DeleteAnalysisTemplateInput"}, + "output":{"shape":"DeleteAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Deletes an analysis template.
", + "idempotent":true + }, "DeleteCollaboration":{ "name":"DeleteCollaboration", "http":{ @@ -247,6 +304,24 @@ "documentation":"Deletes a specified membership. All resources under a membership must be deleted.
", "idempotent":true }, + "GetAnalysisTemplate":{ + "name":"GetAnalysisTemplate", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates/{analysisTemplateIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetAnalysisTemplateInput"}, + "output":{"shape":"GetAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Retrieves an analysis template.
" + }, "GetCollaboration":{ "name":"GetCollaboration", "http":{ @@ -264,6 +339,24 @@ ], "documentation":"Returns metadata about a collaboration.
" }, + "GetCollaborationAnalysisTemplate":{ + "name":"GetCollaborationAnalysisTemplate", + "http":{ + "method":"GET", + "requestUri":"/collaborations/{collaborationIdentifier}/analysistemplates/{analysisTemplateArn}", + "responseCode":200 + }, + "input":{"shape":"GetCollaborationAnalysisTemplateInput"}, + "output":{"shape":"GetCollaborationAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Retrieves an analysis template within a collaboration.
" + }, "GetConfiguredTable":{ "name":"GetConfiguredTable", "http":{ @@ -390,6 +483,42 @@ ], "documentation":"Retrieves a schema analysis rule.
" }, + "ListAnalysisTemplates":{ + "name":"ListAnalysisTemplates", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates", + "responseCode":200 + }, + "input":{"shape":"ListAnalysisTemplatesInput"}, + "output":{"shape":"ListAnalysisTemplatesOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Lists analysis templates that the caller owns.
" + }, + "ListCollaborationAnalysisTemplates":{ + "name":"ListCollaborationAnalysisTemplates", + "http":{ + "method":"GET", + "requestUri":"/collaborations/{collaborationIdentifier}/analysistemplates", + "responseCode":200 + }, + "input":{"shape":"ListCollaborationAnalysisTemplatesInput"}, + "output":{"shape":"ListCollaborationAnalysisTemplatesOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Lists analysis templates within a collaboration.
" + }, "ListCollaborations":{ "name":"ListCollaborations", "http":{ @@ -577,6 +706,24 @@ ], "documentation":"Removes a tag or list of tags from a resource.
" }, + "UpdateAnalysisTemplate":{ + "name":"UpdateAnalysisTemplate", + "http":{ + "method":"PATCH", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates/{analysisTemplateIdentifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateAnalysisTemplateInput"}, + "output":{"shape":"UpdateAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Updates the analysis template metadata.
" + }, "UpdateCollaboration":{ "name":"UpdateCollaboration", "http":{ @@ -795,10 +942,43 @@ "max":100, "min":1 }, + "AnalysisFormat":{ + "type":"string", + "enum":["SQL"] + }, "AnalysisMethod":{ "type":"string", "enum":["DIRECT_QUERY"] }, + "AnalysisParameter":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"ParameterName", + "documentation":"The name of the parameter. The name must use only alphanumeric, underscore (_), or hyphen (-) characters but cannot start or end with a hyphen.
" + }, + "type":{ + "shape":"ParameterType", + "documentation":"The type of parameter.
" + }, + "defaultValue":{ + "shape":"ParameterValue", + "documentation":"Optional. The default value that is applied in the analysis template. The member who can query can override this value in the query editor.
" + } + }, + "documentation":"Optional. The member who can query can provide this placeholder for a literal data value in an analysis template.
", + "sensitive":true + }, + "AnalysisParameterList":{ + "type":"list", + "member":{"shape":"AnalysisParameter"}, + "max":10, + "min":0 + }, "AnalysisRule":{ "type":"structure", "required":[ @@ -816,7 +996,7 @@ }, "type":{ "shape":"AnalysisRuleType", - "documentation":"The type of analysis rule. Valid values are `AGGREGATION` and `LIST`.
" + "documentation":"The type of analysis rule.
" }, "name":{ "shape":"TableAlias", @@ -876,7 +1056,7 @@ "documentation":"Columns that must meet a specific threshold value (after an aggregation function is applied to it) for each output row to be returned.
" } }, - "documentation":"Enables query structure and specified queries that produce aggregate statistics.
" + "documentation":"A type of analysis rule that enables query structure and specified queries that produce aggregate statistics.
" }, "AnalysisRuleAggregationAggregateColumnsList":{ "type":"list", @@ -893,6 +1073,31 @@ "min":1, "pattern":"[a-z0-9_](([a-z0-9_ ]+-)*([a-z0-9_ ]+))?" }, + "AnalysisRuleCustom":{ + "type":"structure", + "required":["allowedAnalyses"], + "members":{ + "allowedAnalyses":{ + "shape":"AnalysisRuleCustomAllowedAnalysesList", + "documentation":"The analysis templates that are allowed by the custom analysis rule.
" + }, + "allowedAnalysisProviders":{ + "shape":"AnalysisRuleCustomAllowedAnalysisProvidersList", + "documentation":"The Amazon Web Services accounts that are allowed to query by the custom analysis rule. Required when allowedAnalyses is ANY_QUERY.
A type of analysis rule that enables the table owner to approve custom SQL queries on their configured tables.
" + }, + "AnalysisRuleCustomAllowedAnalysesList":{ + "type":"list", + "member":{"shape":"AnalysisTemplateArnOrQueryWildcard"}, + "min":0 + }, + "AnalysisRuleCustomAllowedAnalysisProvidersList":{ + "type":"list", + "member":{"shape":"AccountId"}, + "min":0 + }, "AnalysisRuleList":{ "type":"structure", "required":[ @@ -906,7 +1111,7 @@ }, "allowedJoinOperators":{ "shape":"JoinOperatorsList", - "documentation":"Which logical operators (if any) are to be used in an INNER JOIN match condition. Default is AND.
The logical operators (if any) that are to be used in an INNER JOIN match condition. Default is AND.
Controls on the query specifications that can be run on configured table..
" + "documentation":"Controls on the query specifications that can be run on configured table.
" } }, - "documentation":"Controls on the query specifications that can be run on configured table..
", + "documentation":"Controls on the query specifications that can be run on configured table.
", "union":true }, "AnalysisRulePolicyV1":{ @@ -941,22 +1146,280 @@ "aggregation":{ "shape":"AnalysisRuleAggregation", "documentation":"Analysis rule type that enables only aggregation queries on a configured table.
" + }, + "custom":{ + "shape":"AnalysisRuleCustom", + "documentation":"Analysis rule type that enables custom SQL queries on a configured table.
" } }, - "documentation":"Controls on the query specifications that can be run on configured table..
", + "documentation":"Controls on the query specifications that can be run on configured table.
", "union":true }, "AnalysisRuleType":{ "type":"string", "enum":[ "AGGREGATION", - "LIST" + "LIST", + "CUSTOM" ] }, "AnalysisRuleTypeList":{ "type":"list", "member":{"shape":"AnalysisRuleType"} }, + "AnalysisSchema":{ + "type":"structure", + "members":{ + "referencedTables":{ + "shape":"QueryTables", + "documentation":"The tables referenced in the analysis schema.
" + } + }, + "documentation":"A relation within an analysis.
" + }, + "AnalysisSource":{ + "type":"structure", + "members":{ + "text":{ + "shape":"AnalysisTemplateText", + "documentation":"The query text.
" + } + }, + "documentation":"The structure that defines the body of the analysis template.
", + "sensitive":true, + "union":true + }, + "AnalysisTemplate":{ + "type":"structure", + "required":[ + "id", + "arn", + "collaborationId", + "collaborationArn", + "membershipId", + "membershipArn", + "name", + "createTime", + "updateTime", + "schema", + "format", + "source" + ], + "members":{ + "id":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"The identifier for the analysis template.
" + }, + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"The Amazon Resource Name (ARN) of the analysis template.
" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"The unique ID for the associated collaboration of the analysis template.
" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"The unique ARN for the analysis template’s associated collaboration.
" + }, + "membershipId":{ + "shape":"UUID", + "documentation":"The identifier of a member who created the analysis template.
" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"The Amazon Resource Name (ARN) of the member who created the analysis template.
" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"The description of the analysis template.
" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"The name of the analysis template.
" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"The time that the analysis template was created.
" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"The time that the analysis template was last updated.
" + }, + "schema":{ + "shape":"AnalysisSchema", + "documentation":"The entire schema object.
" + }, + "format":{ + "shape":"AnalysisFormat", + "documentation":"The format of the analysis template.
" + }, + "source":{ + "shape":"AnalysisSource", + "documentation":"The source of the analysis template.
" + }, + "analysisParameters":{ + "shape":"AnalysisParameterList", + "documentation":"The parameters of the analysis template.
" + } + }, + "documentation":"The analysis template.
" + }, + "AnalysisTemplateArn":{ + "type":"string", + "max":200, + "min":0, + "pattern":"arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+" + }, + "AnalysisTemplateArnList":{ + "type":"list", + "member":{"shape":"AnalysisTemplateArn"}, + "max":10, + "min":1 + }, + "AnalysisTemplateArnOrQueryWildcard":{ + "type":"string", + "max":200, + "min":0, + "pattern":"(ANY_QUERY|arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+)" + }, + "AnalysisTemplateIdentifier":{ + "type":"string", + "max":36, + "min":36, + "pattern":".*[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}.*" + }, + "AnalysisTemplateSummary":{ + "type":"structure", + "required":[ + "arn", + "createTime", + "id", + "name", + "updateTime", + "membershipArn", + "membershipId", + "collaborationArn", + "collaborationId" + ], + "members":{ + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"The Amazon Resource Name (ARN) of the analysis template.
" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"The time that the analysis template summary was created.
" + }, + "id":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"The identifier of the analysis template.
" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"The name of the analysis template.
" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"The time that the analysis template summary was last updated.
" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"The Amazon Resource Name (ARN) of the member who created the analysis template.
" + }, + "membershipId":{ + "shape":"UUID", + "documentation":"The identifier for a membership resource.
" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"The unique ARN for the analysis template summary’s associated collaboration.
" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"A unique identifier for the collaboration that the analysis template summary belongs to. Currently accepts collaboration ID.
" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"The description of the analysis template.
" + } + }, + "documentation":"The metadata of the analysis template.
" + }, + "AnalysisTemplateSummaryList":{ + "type":"list", + "member":{"shape":"AnalysisTemplateSummary"} + }, + "AnalysisTemplateText":{ + "type":"string", + "max":15000, + "min":0 + }, + "BatchGetCollaborationAnalysisTemplateError":{ + "type":"structure", + "required":[ + "arn", + "code", + "message" + ], + "members":{ + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"The Amazon Resource Name (ARN) of the analysis template.
" + }, + "code":{ + "shape":"String", + "documentation":"An error code for the error.
" + }, + "message":{ + "shape":"String", + "documentation":"A description of why the call failed.
" + } + }, + "documentation":"Details of errors thrown by the call to retrieve multiple analysis templates within a collaboration by their identifiers.
" + }, + "BatchGetCollaborationAnalysisTemplateErrorList":{ + "type":"list", + "member":{"shape":"BatchGetCollaborationAnalysisTemplateError"}, + "max":10, + "min":0 + }, + "BatchGetCollaborationAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "collaborationIdentifier", + "analysisTemplateArns" + ], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.
", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "analysisTemplateArns":{ + "shape":"AnalysisTemplateArnList", + "documentation":"The Amazon Resource Name (ARN) associated with the analysis template within a collaboration.
" + } + } + }, + "BatchGetCollaborationAnalysisTemplateOutput":{ + "type":"structure", + "required":[ + "collaborationAnalysisTemplates", + "errors" + ], + "members":{ + "collaborationAnalysisTemplates":{ + "shape":"CollaborationAnalysisTemplateList", + "documentation":"The retrieved list of analysis templates within a collaboration.
" + }, + "errors":{ + "shape":"BatchGetCollaborationAnalysisTemplateErrorList", + "documentation":"Error reasons for collaboration analysis templates that could not be retrieved. One error is returned for every collaboration analysis template that could not be retrieved.
" + } + } + }, "BatchGetSchemaError":{ "type":"structure", "required":[ @@ -1066,40 +1529,173 @@ "shape":"AccountId", "documentation":"The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.
" }, - "creatorDisplayName":{ - "shape":"DisplayName", - "documentation":"A display name of the collaboration creator.
" + "creatorDisplayName":{ + "shape":"DisplayName", + "documentation":"A display name of the collaboration creator.
" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"The time when the collaboration was created.
" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"The time the collaboration metadata was last updated.
" + }, + "memberStatus":{ + "shape":"MemberStatus", + "documentation":"The status of a member in a collaboration.
" + }, + "membershipId":{ + "shape":"UUID", + "documentation":"The unique ID for your membership within the collaboration.
" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"The unique ARN for your membership within the collaboration.
" + }, + "dataEncryptionMetadata":{ + "shape":"DataEncryptionMetadata", + "documentation":"The settings for client-side encryption for cryptographic computing.
" + }, + "queryLogStatus":{ + "shape":"CollaborationQueryLogStatus", + "documentation":"An indicator as to whether query logging has been enabled or disabled for the collaboration.
" + } + }, + "documentation":"The multi-party data share environment. The collaboration contains metadata about its purpose and participants.
" + }, + "CollaborationAnalysisTemplate":{ + "type":"structure", + "required":[ + "id", + "arn", + "collaborationId", + "collaborationArn", + "creatorAccountId", + "name", + "createTime", + "updateTime", + "schema", + "format", + "source" + ], + "members":{ + "id":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"The identifier of the analysis template.
" + }, + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"The Amazon Resource Name (ARN) of the analysis template.
" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.
" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"The unique ARN for the analysis template’s associated collaboration.
" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"The description of the analysis template.
" + }, + "creatorAccountId":{ + "shape":"AccountId", + "documentation":"The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.
" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"The name of the analysis template.
" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"The time that the analysis template within a collaboration was created.
" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"The time that the analysis template in the collaboration was last updated.
" + }, + "schema":{ + "shape":"AnalysisSchema", + "documentation":"The entire schema object.
" + }, + "format":{ + "shape":"AnalysisFormat", + "documentation":"The format of the analysis template in the collaboration.
" + }, + "source":{ + "shape":"AnalysisSource", + "documentation":"The source of the analysis template within a collaboration.
" + }, + "analysisParameters":{ + "shape":"AnalysisParameterList", + "documentation":"The analysis parameters that have been specified in the analysis template.
" + } + }, + "documentation":"The analysis template within a collaboration.
" + }, + "CollaborationAnalysisTemplateList":{ + "type":"list", + "member":{"shape":"CollaborationAnalysisTemplate"}, + "max":10, + "min":0 + }, + "CollaborationAnalysisTemplateSummary":{ + "type":"structure", + "required":[ + "arn", + "createTime", + "id", + "name", + "updateTime", + "collaborationArn", + "collaborationId", + "creatorAccountId" + ], + "members":{ + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"The Amazon Resource Name (ARN) of the analysis template.
" }, "createTime":{ "shape":"Timestamp", - "documentation":"The time when the collaboration was created.
" + "documentation":"The time that the summary of the analysis template in a collaboration was created.
" + }, + "id":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"The identifier of the analysis template.
" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"The name of the analysis template.
" }, "updateTime":{ "shape":"Timestamp", - "documentation":"The time the collaboration metadata was last updated.
" + "documentation":"The time that the summary of the analysis template in the collaboration was last updated.
" }, - "memberStatus":{ - "shape":"MemberStatus", - "documentation":"The status of a member in a collaboration.
" + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"The unique ARN for the analysis template’s associated collaboration.
" }, - "membershipId":{ + "collaborationId":{ "shape":"UUID", - "documentation":"The unique ID for your membership within the collaboration.
" - }, - "membershipArn":{ - "shape":"MembershipArn", - "documentation":"The unique ARN for your membership within the collaboration.
" + "documentation":"A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.
" }, - "dataEncryptionMetadata":{ - "shape":"DataEncryptionMetadata", - "documentation":"The settings for client-side encryption for cryptographic computing.
" + "creatorAccountId":{ + "shape":"AccountId", + "documentation":"The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.
" }, - "queryLogStatus":{ - "shape":"CollaborationQueryLogStatus", - "documentation":"An indicator as to whether query logging has been enabled or disabled for the collaboration.
" + "description":{ + "shape":"ResourceDescription", + "documentation":"The description of the analysis template.
" } }, - "documentation":"The multi-party data share environment. The collaboration contains metadata about its purpose and participants.
" + "documentation":"The metadata of the analysis template within a collaboration.
" + }, + "CollaborationAnalysisTemplateSummaryList":{ + "type":"list", + "member":{"shape":"CollaborationAnalysisTemplateSummary"} }, "CollaborationArn":{ "type":"string", @@ -1270,7 +1866,7 @@ }, "analysisRuleTypes":{ "shape":"ConfiguredTableAnalysisRuleTypeList", - "documentation":"The types of analysis rules associated with this configured table. Valid values are `AGGREGATION` and `LIST`. Currently, only one analysis rule may be associated with a configured table.
" + "documentation":"The types of analysis rules associated with this configured table. Currently, only one analysis rule may be associated with a configured table.
" }, "analysisMethod":{ "shape":"AnalysisMethod", @@ -1308,7 +1904,7 @@ }, "type":{ "shape":"ConfiguredTableAnalysisRuleType", - "documentation":"The type of configured table analysis rule. Valid values are `AGGREGATION` and `LIST`.
" + "documentation":"The type of configured table analysis rule.
" }, "createTime":{ "shape":"Timestamp", @@ -1342,7 +1938,8 @@ "aggregation":{ "shape":"AnalysisRuleAggregation", "documentation":"Analysis rule type that enables only aggregation queries on a configured table.
" - } + }, + "custom":{"shape":"AnalysisRuleCustom"} }, "documentation":"Controls on the query specifications that can be run on a configured table.
", "union":true @@ -1351,7 +1948,8 @@ "type":"string", "enum":[ "AGGREGATION", - "LIST" + "LIST", + "CUSTOM" ] }, "ConfiguredTableAnalysisRuleTypeList":{ @@ -1362,7 +1960,7 @@ "type":"string", "max":100, "min":0, - "pattern":"arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredTable/[\\d\\w-]+" + "pattern":"arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredtable/[\\d\\w-]+" }, "ConfiguredTableAssociation":{ "type":"structure", @@ -1430,7 +2028,7 @@ "type":"string", "max":100, "min":0, - "pattern":"arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredTableAssociation/[\\d\\w-]+/[\\d\\w-]+" + "pattern":"arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredtableassociation/[\\d\\w-]+/[\\d\\w-]+" }, "ConfiguredTableAssociationIdentifier":{ "type":"string", @@ -1578,6 +2176,57 @@ "INVALID_STATE" ] }, + "CreateAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "name", + "format", + "source" + ], + "members":{ + "description":{ + "shape":"ResourceDescription", + "documentation":"The description of the analysis template.
" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"The identifier for a membership resource.
", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "name":{ + "shape":"TableAlias", + "documentation":"The name of the analysis template.
" + }, + "format":{ + "shape":"AnalysisFormat", + "documentation":"The format of the analysis template.
" + }, + "source":{ + "shape":"AnalysisSource", + "documentation":"The information in the analysis template. Currently supports text, the query text for the analysis template.
An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.
" + }, + "analysisParameters":{ + "shape":"AnalysisParameterList", + "documentation":"The parameters of the analysis template.
" + } + } + }, + "CreateAnalysisTemplateOutput":{ + "type":"structure", + "required":["analysisTemplate"], + "members":{ + "analysisTemplate":{ + "shape":"AnalysisTemplate", + "documentation":"The analysis template.
" + } + } + }, "CreateCollaborationInput":{ "type":"structure", "required":[ @@ -1649,7 +2298,7 @@ }, "analysisRuleType":{ "shape":"ConfiguredTableAnalysisRuleType", - "documentation":"The type of analysis rule. Valid values are AGGREGATION and LIST.
" + "documentation":"The type of analysis rule.
" }, "analysisRulePolicy":{ "shape":"ConfiguredTableAnalysisRulePolicy", @@ -1818,6 +2467,32 @@ }, "documentation":"The settings for client-side encryption for cryptographic computing.
" }, + "DeleteAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "analysisTemplateIdentifier" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"The identifier for a membership resource.
", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "analysisTemplateIdentifier":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"The identifier for the analysis template resource.
", + "location":"uri", + "locationName":"analysisTemplateIdentifier" + } + } + }, + "DeleteAnalysisTemplateOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteCollaborationInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -1966,6 +2641,68 @@ "ACTIVE" ] }, + "GetAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "analysisTemplateIdentifier" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"The identifier for a membership resource.
", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "analysisTemplateIdentifier":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"The identifier for the analysis template resource.
", + "location":"uri", + "locationName":"analysisTemplateIdentifier" + } + } + }, + "GetAnalysisTemplateOutput":{ + "type":"structure", + "required":["analysisTemplate"], + "members":{ + "analysisTemplate":{ + "shape":"AnalysisTemplate", + "documentation":"The analysis template.
" + } + } + }, + "GetCollaborationAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "collaborationIdentifier", + "analysisTemplateArn" + ], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.
", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "analysisTemplateArn":{ + "shape":"AnalysisTemplateArn", + "documentation":"The Amazon Resource Name (ARN) associated with the analysis template within a collaboration.
", + "location":"uri", + "locationName":"analysisTemplateArn" + } + } + }, + "GetCollaborationAnalysisTemplateOutput":{ + "type":"structure", + "required":["collaborationAnalysisTemplate"], + "members":{ + "collaborationAnalysisTemplate":{ + "shape":"CollaborationAnalysisTemplate", + "documentation":"The analysis template within a collaboration.
" + } + } + }, "GetCollaborationInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -2255,6 +2992,82 @@ "type":"string", "pattern":"[\\w!.*/-]*" }, + "ListAnalysisTemplatesInput":{ + "type":"structure", + "required":["membershipIdentifier"], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"The identifier for a membership resource.
", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"The token value retrieved from a previous call to access the next page of results.
", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"The maximum size of the results that is returned per call.
", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAnalysisTemplatesOutput":{ + "type":"structure", + "required":["analysisTemplateSummaries"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"The token value retrieved from a previous call to access the next page of results.
" + }, + "analysisTemplateSummaries":{ + "shape":"AnalysisTemplateSummaryList", + "documentation":"Lists analysis template metadata.
" + } + } + }, + "ListCollaborationAnalysisTemplatesInput":{ + "type":"structure", + "required":["collaborationIdentifier"], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.
", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"The token value retrieved from a previous call to access the next page of results.
", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"The maximum size of the results that is returned per call.
", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListCollaborationAnalysisTemplatesOutput":{ + "type":"structure", + "required":["collaborationAnalysisTemplateSummaries"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"The token value retrieved from a previous call to access the next page of results.
" + }, + "collaborationAnalysisTemplateSummaries":{ + "shape":"CollaborationAnalysisTemplateSummaryList", + "documentation":"The metadata of the analysis template within a collaboration.
" + } + } + }, "ListCollaborationsInput":{ "type":"structure", "members":{ @@ -2822,6 +3635,42 @@ "max":10240, "min":0 }, + "ParameterMap":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValue"} + }, + "ParameterName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[0-9a-zA-Z_]+" + }, + "ParameterType":{ + "type":"string", + "enum":[ + "SMALLINT", + "INTEGER", + "BIGINT", + "DECIMAL", + "REAL", + "DOUBLE_PRECISION", + "BOOLEAN", + "CHAR", + "VARCHAR", + "DATE", + "TIMESTAMP", + "TIMESTAMPTZ", + "TIME", + "TIMETZ", + "VARBYTE" + ] + }, + "ParameterValue":{ + "type":"string", + "max":250, + "min":0 + }, "ProtectedQuery":{ "type":"structure", "required":[ @@ -2990,6 +3839,14 @@ "queryString":{ "shape":"ProtectedQuerySQLParametersQueryStringString", "documentation":"The query string to be submitted.
" + }, + "analysisTemplateArn":{ + "shape":"AnalysisTemplateArn", + "documentation":"The Amazon Resource Name (ARN) associated with the analysis template within a collaboration.
" + }, + "parameters":{ + "shape":"ParameterMap", + "documentation":"The protected query SQL parameters.
" } }, "documentation":"The parameters for the SQL type Protected Query.
", @@ -3063,6 +3920,22 @@ "type":"string", "enum":["SQL"] }, + "QueryTables":{ + "type":"list", + "member":{"shape":"TableAlias"} + }, + "ResourceAlias":{ + "type":"string", + "max":128, + "min":0, + "pattern":"[a-zA-Z0-9_](([a-zA-Z0-9_ ]+-)*([a-zA-Z0-9_ ]+))?" + }, + "ResourceDescription":{ + "type":"string", + "max":255, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*" + }, "ResourceNotFoundException":{ "type":"structure", "required":[ @@ -3158,7 +4031,7 @@ }, "analysisRuleTypes":{ "shape":"AnalysisRuleTypeList", - "documentation":"The analysis rule types associated with the schema. Valued values are LIST and AGGREGATION. Currently, only one entry is present.
" + "documentation":"The analysis rule types associated with the schema. Currently, only one entry is present.
" }, "analysisMethod":{ "shape":"AnalysisMethod", @@ -3454,6 +4327,41 @@ "members":{ } }, + "UpdateAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "analysisTemplateIdentifier" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"The identifier for a membership resource.
", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "analysisTemplateIdentifier":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"The identifier for the analysis template resource.
", + "location":"uri", + "locationName":"analysisTemplateIdentifier" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"A new description for the analysis template.
" + } + } + }, + "UpdateAnalysisTemplateOutput":{ + "type":"structure", + "required":["analysisTemplate"], + "members":{ + "analysisTemplate":{ + "shape":"AnalysisTemplate", + "documentation":"The analysis template.
" + } + } + }, "UpdateCollaborationInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -3695,7 +4603,8 @@ "enum":[ "FIELD_VALIDATION_FAILED", "INVALID_CONFIGURATION", - "INVALID_QUERY" + "INVALID_QUERY", + "IAM_SYNCHRONIZATION_DELAY" ] } }, From cea7e03eb499f7737bd80aaa8dbd8a5785793eef Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:28 +0000 Subject: [PATCH 071/270] Amazon Omics Update: Add CreationType filter for ListReadSets --- .../feature-AmazonOmics-a0dd932.json | 6 + .../codegen-resources/endpoint-tests.json | 187 ++++++++++-------- .../codegen-resources/service-2.json | 19 ++ 3 files changed, 128 insertions(+), 84 deletions(-) create mode 100644 .changes/next-release/feature-AmazonOmics-a0dd932.json diff --git a/.changes/next-release/feature-AmazonOmics-a0dd932.json b/.changes/next-release/feature-AmazonOmics-a0dd932.json new file mode 100644 index 000000000000..234dc5915d69 --- /dev/null +++ b/.changes/next-release/feature-AmazonOmics-a0dd932.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "Add CreationType filter for ListReadSets" +} diff --git a/services/omics/src/main/resources/codegen-resources/endpoint-tests.json b/services/omics/src/main/resources/codegen-resources/endpoint-tests.json index de9c6b650010..ce9880720938 100644 --- a/services/omics/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/omics/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,55 +1,55 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://omics-fips.us-gov-east-1.api.aws" + "url": "https://omics-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics-fips.us-gov-east-1.amazonaws.com" + "url": "https://omics-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://omics.us-gov-east-1.api.aws" + "url": "https://omics.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics.us-gov-east-1.amazonaws.com" + "url": "https://omics.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -99,109 +99,109 @@ } }, "params": { - "UseDualStack": false, "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://omics-fips.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://omics-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://omics.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics.us-iso-east-1.c2s.ic.gov" + "url": "https://omics.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://omics-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics-fips.us-east-1.amazonaws.com" + "url": "https://omics-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://omics.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics.us-east-1.amazonaws.com" + "url": "https://omics.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -247,22 +247,35 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -272,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/omics/src/main/resources/codegen-resources/service-2.json b/services/omics/src/main/resources/codegen-resources/service-2.json index 02478977fc0c..7dcbf00e0ac4 100644 --- a/services/omics/src/main/resources/codegen-resources/service-2.json +++ b/services/omics/src/main/resources/codegen-resources/service-2.json @@ -2625,6 +2625,13 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "CreationType":{ + "type":"string", + "enum":[ + "IMPORT", + "UPLOAD" + ] + }, "DeleteAnnotationStoreRequest":{ "type":"structure", "required":["name"], @@ -3420,6 +3427,10 @@ "statusMessage":{ "shape":"ReadSetStatusMessage", "documentation":"The status message for a read set. It provides more detail as to why the read set has a status.
" + }, + "creationType":{ + "shape":"CreationType", + "documentation":"The creation type of the read set.
" } } }, @@ -5824,6 +5835,10 @@ "generatedFrom":{ "shape":"GeneratedFrom", "documentation":"Where the source originated.
" + }, + "creationType":{ + "shape":"CreationType", + "documentation":"The creation type of the read set.
" } }, "documentation":"A filter for read sets.
" @@ -5924,6 +5939,10 @@ "statusMessage":{ "shape":"ReadSetStatusMessage", "documentation":"The status for a read set. It provides more detail as to why the read set has a status.
" + }, + "creationType":{ + "shape":"CreationType", + "documentation":"The creation type of the read set.
" } }, "documentation":"A read set.
" From c046d0698f6d7a0c6b5856fd374373c82b1333ac Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:24 +0000 Subject: [PATCH 072/270] Inspector2 Update: This release adds 1 new API: BatchGetFindingDetails to retrieve enhanced vulnerability intelligence details for findings. --- .../feature-Inspector2-0d9bb78.json | 6 + .../codegen-resources/endpoint-rule-set.json | 344 ++++++++---------- .../codegen-resources/service-2.json | 177 ++++++++- 3 files changed, 335 insertions(+), 192 deletions(-) create mode 100644 .changes/next-release/feature-Inspector2-0d9bb78.json diff --git a/.changes/next-release/feature-Inspector2-0d9bb78.json b/.changes/next-release/feature-Inspector2-0d9bb78.json new file mode 100644 index 000000000000..a4e808de9767 --- /dev/null +++ b/.changes/next-release/feature-Inspector2-0d9bb78.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Inspector2", + "contributor": "", + "description": "This release adds 1 new API: BatchGetFindingDetails to retrieve enhanced vulnerability intelligence details for findings." +} diff --git a/services/inspector2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/inspector2/src/main/resources/codegen-resources/endpoint-rule-set.json index 303effe98ea7..a8d6cdbbc6f8 100644 --- a/services/inspector2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/inspector2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://inspector2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://inspector2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://inspector2-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://inspector2-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://inspector2.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://inspector2.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://inspector2.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://inspector2.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/inspector2/src/main/resources/codegen-resources/service-2.json b/services/inspector2/src/main/resources/codegen-resources/service-2.json index 26d23ea7d676..357ec7f71f47 100644 --- a/services/inspector2/src/main/resources/codegen-resources/service-2.json +++ b/services/inspector2/src/main/resources/codegen-resources/service-2.json @@ -65,6 +65,23 @@ ], "documentation":"Retrieves code snippets from findings that Amazon Inspector detected code vulnerabilities in.
" }, + "BatchGetFindingDetails":{ + "name":"BatchGetFindingDetails", + "http":{ + "method":"POST", + "requestUri":"/findings/details/batch/get", + "responseCode":200 + }, + "input":{"shape":"BatchGetFindingDetailsRequest"}, + "output":{"shape":"BatchGetFindingDetailsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Gets vulnerability details for findings.
" + }, "BatchGetFreeTrialInfo":{ "name":"BatchGetFreeTrialInfo", "http":{ @@ -1492,6 +1509,29 @@ } } }, + "BatchGetFindingDetailsRequest":{ + "type":"structure", + "required":["findingArns"], + "members":{ + "findingArns":{ + "shape":"FindingArnList", + "documentation":"A list of finding ARNs.
" + } + } + }, + "BatchGetFindingDetailsResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"FindingDetailsErrorList", + "documentation":"Error information for findings that details could not be returned for.
" + }, + "findingDetails":{ + "shape":"FindingDetails", + "documentation":"A finding's vulnerability details.
" + } + } + }, "BatchGetFreeTrialInfoRequest":{ "type":"structure", "required":["accountIds"], @@ -2416,7 +2456,7 @@ }, "keyPrefix":{ "shape":"String", - "documentation":"The prefix of the Amazon S3 bucket used to export findings.
" + "documentation":"The prefix that the findings will be written under.
" }, "kmsKeyArn":{ "shape":"String", @@ -2820,6 +2860,40 @@ ] }, "ErrorMessage":{"type":"string"}, + "Evidence":{ + "type":"structure", + "members":{ + "evidenceDetail":{ + "shape":"EvidenceDetail", + "documentation":"The evidence details.
" + }, + "evidenceRule":{ + "shape":"EvidenceRule", + "documentation":"The evidence rule.
" + }, + "severity":{ + "shape":"EvidenceSeverity", + "documentation":"The evidence severity.
" + } + }, + "documentation":"Details of the evidence for a vulnerability identified in a finding.
" + }, + "EvidenceDetail":{ + "type":"string", + "min":0 + }, + "EvidenceList":{ + "type":"list", + "member":{"shape":"Evidence"} + }, + "EvidenceRule":{ + "type":"string", + "min":0 + }, + "EvidenceSeverity":{ + "type":"string", + "min":0 + }, "ExecutionRoleArn":{ "type":"string", "pattern":"^arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" @@ -3123,7 +3197,7 @@ }, "networkProtocol":{ "shape":"StringFilterList", - "documentation":"Details on the ingress source addresses used to filter findings.
" + "documentation":"Details on network protocol used to filter findings.
" }, "portRange":{ "shape":"PortRangeFilterList", @@ -3303,11 +3377,98 @@ "min":1, "pattern":"^arn:(aws[a-zA-Z-]*)?:inspector2:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:finding/[a-f0-9]{32}$" }, + "FindingArnList":{ + "type":"list", + "member":{"shape":"FindingArn"}, + "max":10, + "min":1 + }, "FindingDescription":{ "type":"string", "max":1024, "min":1 }, + "FindingDetail":{ + "type":"structure", + "members":{ + "cisaData":{"shape":"CisaData"}, + "cwes":{ + "shape":"Cwes", + "documentation":"The Common Weakness Enumerations (CWEs) associated with the vulnerability.
" + }, + "epssScore":{ + "shape":"Double", + "documentation":"The Exploit Prediction Scoring System (EPSS) score of the vulnerability.
" + }, + "evidences":{ + "shape":"EvidenceList", + "documentation":"Information on the evidence of the vulnerability.
" + }, + "exploitObserved":{"shape":"ExploitObserved"}, + "findingArn":{ + "shape":"FindingArn", + "documentation":"The finding ARN that the vulnerability details are associated with.
" + }, + "referenceUrls":{ + "shape":"VulnerabilityReferenceUrls", + "documentation":"The reference URLs for the vulnerability data.
" + }, + "riskScore":{ + "shape":"RiskScore", + "documentation":"The risk score of the vulnerability.
" + }, + "tools":{ + "shape":"Tools", + "documentation":"The known malware tools or kits that can exploit the vulnerability.
" + }, + "ttps":{ + "shape":"Ttps", + "documentation":"The MITRE adversary tactics, techniques, or procedures (TTPs) associated with the vulnerability.
" + } + }, + "documentation":"Details of the vulnerability identified in a finding.
" + }, + "FindingDetails":{ + "type":"list", + "member":{"shape":"FindingDetail"}, + "min":0 + }, + "FindingDetailsError":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "findingArn" + ], + "members":{ + "errorCode":{ + "shape":"FindingDetailsErrorCode", + "documentation":"The error code.
" + }, + "errorMessage":{ + "shape":"NonEmptyString", + "documentation":"The error message.
" + }, + "findingArn":{ + "shape":"FindingArn", + "documentation":"The finding ARN that returned an error.
" + } + }, + "documentation":"Details about an error encountered when trying to return vulnerability data for a finding.
" + }, + "FindingDetailsErrorCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "ACCESS_DENIED", + "FINDING_DETAILS_NOT_FOUND", + "INVALID_INPUT" + ] + }, + "FindingDetailsErrorList":{ + "type":"list", + "member":{"shape":"FindingDetailsError"} + }, "FindingList":{ "type":"list", "member":{"shape":"Finding"}, @@ -5283,6 +5444,10 @@ "AWS_LAMBDA_FUNCTION" ] }, + "RiskScore":{ + "type":"integer", + "box":true + }, "Runtime":{ "type":"string", "enum":[ @@ -5812,6 +5977,14 @@ "ALL" ] }, + "Tool":{ + "type":"string", + "min":0 + }, + "Tools":{ + "type":"list", + "member":{"shape":"Tool"} + }, "Ttp":{ "type":"string", "max":30, From f109b88f31daf14ac1920da0df5cad24fa31ab1a Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:31 +0000 Subject: [PATCH 073/270] Amazon Relational Database Service Update: This release adds support for Aurora MySQL local write forwarding, which allows for forwarding of write operations from reader DB instances to the writer DB instance. --- ...azonRelationalDatabaseService-7a501b8.json | 6 ++++ .../codegen-resources/service-2.json | 30 +++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 .changes/next-release/feature-AmazonRelationalDatabaseService-7a501b8.json diff --git a/.changes/next-release/feature-AmazonRelationalDatabaseService-7a501b8.json b/.changes/next-release/feature-AmazonRelationalDatabaseService-7a501b8.json new file mode 100644 index 000000000000..a7281ae20c14 --- /dev/null +++ b/.changes/next-release/feature-AmazonRelationalDatabaseService-7a501b8.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release adds support for Aurora MySQL local write forwarding, which allows for forwarding of write operations from reader DB instances to the writer DB instance." +} diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index ea473cbf6d1b..fbff23ec2620 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -3862,6 +3862,10 @@ "MasterUserSecretKmsKeyId":{ "shape":"String", "documentation":"The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.
This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.
If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.
There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.
Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters
" + }, + "EnableLocalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.
Valid for: Aurora DB clusters only
" } }, "documentation":"" @@ -5074,6 +5078,10 @@ "IOOptimizedNextAllowedModificationTime":{ "shape":"TStamp", "documentation":"The next time you can modify the DB cluster to use the aurora-iopt1 storage type.
This setting is only for Aurora DB clusters.
" + }, + "LocalWriteForwardingStatus":{ + "shape":"LocalWriteForwardingStatus", + "documentation":"Specifies whether an Aurora DB cluster has in-cluster write forwarding enabled, not enabled, requested, or is in the process of enabling it.
" } }, "documentation":"Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.
For an Amazon Aurora DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.
For a Multi-AZ DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
", @@ -5828,6 +5836,10 @@ "SupportedCACertificateIdentifiers":{ "shape":"CACertificateIdentifiersList", "documentation":"A list of the supported CA certificate identifiers.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
" + }, + "SupportsLocalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"A value that indicates whether the DB engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.
Valid for: Aurora DB clusters only
" } }, "documentation":"This data type is used as a response element in the action DescribeDBEngineVersions.
Specifies whether engine mode changes from serverless to provisioned are allowed.
Valid for Cluster Type: Aurora Serverless v1 DB clusters only
Constraints:
You must allow engine mode changes when specifying a different value for the EngineMode parameter from the DB cluster's current engine mode.
Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.
Valid for: Aurora DB clusters only
" } }, "documentation":"" @@ -14477,6 +14503,10 @@ "SupportsBabelfish":{ "shape":"BooleanOptional", "documentation":"A value that indicates whether you can use Babelfish for Aurora PostgreSQL with the target engine version.
" + }, + "SupportsLocalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"A value that indicates whether the target engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.
Valid for: Aurora DB clusters only
" } }, "documentation":"The version of the database engine that a DB instance can be upgraded to.
" From 7c330c74feed14d535a3ea5929f3a3e81ddd48d9 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:18 +0000 Subject: [PATCH 074/270] Amazon Lookout for Equipment Update: This release includes new import resource, model versioning and resource policy features. --- ...ure-AmazonLookoutforEquipment-7e4645c.json | 6 + .../codegen-resources/endpoint-rule-set.json | 340 ++++---- .../codegen-resources/endpoint-tests.json | 243 ++++-- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 737 +++++++++++++++++- 5 files changed, 1099 insertions(+), 232 deletions(-) create mode 100644 .changes/next-release/feature-AmazonLookoutforEquipment-7e4645c.json diff --git a/.changes/next-release/feature-AmazonLookoutforEquipment-7e4645c.json b/.changes/next-release/feature-AmazonLookoutforEquipment-7e4645c.json new file mode 100644 index 000000000000..dbce54545454 --- /dev/null +++ b/.changes/next-release/feature-AmazonLookoutforEquipment-7e4645c.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Lookout for Equipment", + "contributor": "", + "description": "This release includes new import resource, model versioning and resource policy features." +} diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json index 11910d47fac1..00e0ece38711 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,64 +45,17 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "ref": "UseFIPS" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + true ] } - ] + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" }, { "conditions": [ @@ -111,19 +63,51 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "booleanEquals", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -133,90 +117,109 @@ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://lookoutequipment-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://lookoutequipment-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -229,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://lookoutequipment.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://lookoutequipment.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://lookoutequipment.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://lookoutequipment.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-tests.json b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-tests.json index 6128fb147403..4cc41e239d7a 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,163 +1,299 @@ { "testCases": [ { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.eu-west-1.api.aws" + "url": "https://lookoutequipment-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.eu-west-1.amazonaws.com" + "url": "https://lookoutequipment-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.eu-west-1.api.aws" + "url": "https://lookoutequipment.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.eu-west-1.amazonaws.com" + "url": "https://lookoutequipment-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.ap-northeast-2.api.aws" + "url": "https://lookoutequipment-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.ap-northeast-2.amazonaws.com" + "url": "https://lookoutequipment-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.ap-northeast-2.api.aws" + "url": "https://lookoutequipment.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.ap-northeast-2.amazonaws.com" + "url": "https://lookoutequipment.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.us-east-1.api.aws" + "url": "https://lookoutequipment-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.us-east-1.amazonaws.com" + "url": "https://lookoutequipment.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.us-east-1.api.aws" + "url": "https://lookoutequipment-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.us-east-1.amazonaws.com" + "url": "https://lookoutequipment.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -166,7 +302,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -176,9 +311,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -188,11 +323,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/paginators-1.json b/services/lookoutequipment/src/main/resources/codegen-resources/paginators-1.json index 25be4169143f..18e13f7d3a97 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/paginators-1.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/paginators-1.json @@ -35,6 +35,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListModelVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListModels": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json b/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json index a98b749de4bc..e6a07920eaa1 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"Creates a container for a collection of data being ingested for analysis. The dataset contains the metadata describing where the data is and what the data actually looks like. In other words, it contains the location of the data source, the data schema, and other information. A dataset also contains any tags associated with the ingested data.
" + "documentation":"Creates a container for a collection of data being ingested for analysis. The dataset contains the metadata describing where the data is and what the data actually looks like. For example, it contains the location of the data source, the data schema, and other information. A dataset also contains any tags associated with the ingested data.
" }, "CreateInferenceScheduler":{ "name":"CreateInferenceScheduler", @@ -191,6 +191,23 @@ ], "documentation":"Deletes an ML model currently available for Amazon Lookout for Equipment. This will prevent it from being used with an inference scheduler, even one that is already set up.
" }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourcePolicyRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"Deletes the resource policy attached to the resource.
" + }, "DescribeDataIngestionJob":{ "name":"DescribeDataIngestionJob", "http":{ @@ -293,6 +310,78 @@ ], "documentation":"Provides a JSON containing the overall information about a specific ML model, including model name and ARN, dataset, training and evaluation information, status, and so on.
" }, + "DescribeModelVersion":{ + "name":"DescribeModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeModelVersionRequest"}, + "output":{"shape":"DescribeModelVersionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Retrieves information about a specific machine learning model version.
" + }, + "DescribeResourcePolicy":{ + "name":"DescribeResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourcePolicyRequest"}, + "output":{"shape":"DescribeResourcePolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Provides the details of a resource policy attached to a resource.
" + }, + "ImportDataset":{ + "name":"ImportDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportDatasetRequest"}, + "output":{"shape":"ImportDatasetResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Imports a dataset.
" + }, + "ImportModelVersion":{ + "name":"ImportModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportModelVersionRequest"}, + "output":{"shape":"ImportModelVersionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Imports a model that has been trained successfully.
" + }, "ListDataIngestionJobs":{ "name":"ListDataIngestionJobs", "http":{ @@ -407,6 +496,23 @@ ], "documentation":"Provides a list of labels.
" }, + "ListModelVersions":{ + "name":"ListModelVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListModelVersionsRequest"}, + "output":{"shape":"ListModelVersionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Generates a list of all model versions for a given model, including the model version, model version ARN, and status. To list a subset of versions, use the MaxModelVersion and MinModelVersion fields.
Lists all the tags for a specified resource, including key and value.
" }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutResourcePolicyRequest"}, + "output":{"shape":"PutResourcePolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"Creates a resource control policy for a given resource.
" + }, "StartDataIngestionJob":{ "name":"StartDataIngestionJob", "http":{ @@ -547,6 +672,24 @@ ], "documentation":"Removes a specific tag from a given resource. The tag is specified by its key.
" }, + "UpdateActiveModelVersion":{ + "name":"UpdateActiveModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateActiveModelVersionRequest"}, + "output":{"shape":"UpdateActiveModelVersionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"Sets the active model version for a given machine learning model.
" + }, "UpdateInferenceScheduler":{ "name":"UpdateInferenceScheduler", "http":{ @@ -818,7 +961,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"The ARN of the label group that you have created.
" + "documentation":"The Amazon Resource Name (ARN) of the label group that you have created.
" } } }, @@ -1082,7 +1225,8 @@ "enum":[ "CREATED", "INGESTION_IN_PROGRESS", - "ACTIVE" + "ACTIVE", + "IMPORT_IN_PROGRESS" ] }, "DatasetSummaries":{ @@ -1168,6 +1312,16 @@ } } }, + "DeleteResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the resource for which the resource policy should be deleted.
" + } + } + }, "DescribeDataIngestionJobRequest":{ "type":"structure", "required":["JobId"], @@ -1229,6 +1383,10 @@ "DataEndTime":{ "shape":"Timestamp", "documentation":"Indicates the latest timestamp corresponding to data that was successfully ingested during this specific ingestion job.
" + }, + "SourceDatasetArn":{ + "shape":"DatasetArn", + "documentation":"The Amazon Resource Name (ARN) of the source dataset from which the data used for the data ingestion job was imported from.
" } } }, @@ -1297,6 +1455,10 @@ "DataEndTime":{ "shape":"Timestamp", "documentation":"Indicates the latest timestamp corresponding to data that was successfully ingested during the most recent ingestion of this particular dataset.
" + }, + "SourceDatasetArn":{ + "shape":"DatasetArn", + "documentation":"The Amazon Resource Name (ARN) of the source dataset from which the current data being described was imported from.
" } } }, @@ -1390,7 +1552,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"The ARN of the label group.
" + "documentation":"The Amazon Resource Name (ARN) of the label group.
" }, "FaultCodes":{ "shape":"FaultCodes", @@ -1432,7 +1594,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"The ARN of the requested label group.
" + "documentation":"The Amazon Resource Name (ARN) of the requested label group.
" }, "LabelId":{ "shape":"LabelId", @@ -1566,6 +1728,201 @@ "OffCondition":{ "shape":"OffCondition", "documentation":"Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.
" + }, + "SourceModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the source model version. This field appears if the active model version was imported.
" + }, + "ImportJobStartTime":{ + "shape":"Timestamp", + "documentation":"The date and time when the import job was started. This field appears if the active model version was imported.
" + }, + "ImportJobEndTime":{ + "shape":"Timestamp", + "documentation":"The date and time when the import job was completed. This field appears if the active model version was imported.
" + }, + "ActiveModelVersion":{ + "shape":"ModelVersion", + "documentation":"The name of the model version used by the inference schedular when running a scheduled inference execution.
" + }, + "ActiveModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the model version used by the inference scheduler when running a scheduled inference execution.
" + }, + "ModelVersionActivatedAt":{ + "shape":"Timestamp", + "documentation":"The date the active model version was activated.
" + }, + "PreviousActiveModelVersion":{ + "shape":"ModelVersion", + "documentation":"The model version that was set as the active model version prior to the current active model version.
" + }, + "PreviousActiveModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The ARN of the model version that was set as the active model version prior to the current active model version.
" + }, + "PreviousModelVersionActivatedAt":{ + "shape":"Timestamp", + "documentation":"The date and time when the previous active model version was activated.
" + } + } + }, + "DescribeModelVersionRequest":{ + "type":"structure", + "required":[ + "ModelName", + "ModelVersion" + ], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"The name of the machine learning model that this version belongs to.
" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"The version of the machine learning model.
" + } + } + }, + "DescribeModelVersionResponse":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"The name of the machine learning model that this version belongs to.
" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"The Amazon Resource Name (ARN) of the parent machine learning model that this version belong to.
" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"The version of the machine learning model.
" + }, + "ModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the model version.
" + }, + "Status":{ + "shape":"ModelVersionStatus", + "documentation":"The current status of the model version.
" + }, + "SourceType":{ + "shape":"ModelVersionSourceType", + "documentation":"Indicates whether this model version was created by training or by importing.
" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"The name of the dataset used to train the model version.
" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"The Amazon Resource Name (ARN) of the dataset used to train the model version.
" + }, + "Schema":{ + "shape":"InlineDataSchema", + "documentation":"The schema of the data used to train the model version.
" + }, + "LabelsInputConfiguration":{"shape":"LabelsInputConfiguration"}, + "TrainingDataStartTime":{ + "shape":"Timestamp", + "documentation":"The date on which the training data began being gathered. If you imported the version, this is the date that the training data in the source version began being gathered.
" + }, + "TrainingDataEndTime":{ + "shape":"Timestamp", + "documentation":"The date on which the training data finished being gathered. If you imported the version, this is the date that the training data in the source version finished being gathered.
" + }, + "EvaluationDataStartTime":{ + "shape":"Timestamp", + "documentation":"The date on which the data in the evaluation set began being gathered. If you imported the version, this is the date that the evaluation set data in the source version began being gathered.
" + }, + "EvaluationDataEndTime":{ + "shape":"Timestamp", + "documentation":"The date on which the data in the evaluation set began being gathered. If you imported the version, this is the date that the evaluation set data in the source version finished being gathered.
" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"The Amazon Resource Name (ARN) of the role that was used to train the model version.
" + }, + "DataPreProcessingConfiguration":{"shape":"DataPreProcessingConfiguration"}, + "TrainingExecutionStartTime":{ + "shape":"Timestamp", + "documentation":"The time when the training of the version began.
" + }, + "TrainingExecutionEndTime":{ + "shape":"Timestamp", + "documentation":"The time when the training of the version completed.
" + }, + "FailedReason":{ + "shape":"BoundedLengthString", + "documentation":"The failure message if the training of the model version failed.
" + }, + "ModelMetrics":{ + "shape":"ModelMetrics", + "documentation":"Shows an aggregated summary, in JSON format, of the model's performance within the evaluation time range. These metrics are created when evaluating the model.
" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"Indicates the last time the machine learning model version was updated.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"Indicates the time and date at which the machine learning model version was created.
" + }, + "ServerSideKmsKeyId":{ + "shape":"KmsKeyArn", + "documentation":"The identifier of the KMS key key used to encrypt model version data by Amazon Lookout for Equipment.
" + }, + "OffCondition":{ + "shape":"OffCondition", + "documentation":"Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.
" + }, + "SourceModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"If model version was imported, then this field is the arn of the source model version.
" + }, + "ImportJobStartTime":{ + "shape":"Timestamp", + "documentation":"The date and time when the import job began. This field appears if the model version was imported.
" + }, + "ImportJobEndTime":{ + "shape":"Timestamp", + "documentation":"The date and time when the import job completed. This field appears if the model version was imported.
" + }, + "ImportedDataSizeInBytes":{ + "shape":"DataSizeInBytes", + "documentation":"The size in bytes of the imported data. This field appears if the model version was imported.
" + } + } + }, + "DescribeResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the resource that is associated with the resource policy.
" + } + } + }, + "DescribeResourcePolicyResponse":{ + "type":"structure", + "members":{ + "PolicyRevisionId":{ + "shape":"PolicyRevisionId", + "documentation":"A unique identifier for a revision of the resource policy.
" + }, + "ResourcePolicy":{ + "shape":"Policy", + "documentation":"The resource policy in a JSON-formatted string.
" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"The time when the resource policy was created.
" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"The time when the resource policy was last modified.
" } } }, @@ -1619,6 +1976,122 @@ "min":1, "pattern":"\\p{ASCII}{1,256}" }, + "ImportDatasetRequest":{ + "type":"structure", + "required":[ + "SourceDatasetArn", + "ClientToken" + ], + "members":{ + "SourceDatasetArn":{ + "shape":"DatasetArn", + "documentation":"The Amazon Resource Name (ARN) of the dataset to import.
" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"The name of the machine learning dataset to be created. If the dataset already exists, Amazon Lookout for Equipment overwrites the existing dataset. If you don't specify this field, it is filled with the name of the source dataset.
" + }, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.
", + "idempotencyToken":true + }, + "ServerSideKmsKeyId":{ + "shape":"NameOrArn", + "documentation":"Provides the identifier of the KMS key key used to encrypt model data by Amazon Lookout for Equipment.
" + }, + "Tags":{ + "shape":"TagList", + "documentation":"Any tags associated with the dataset to be created.
" + } + } + }, + "ImportDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"The name of the created machine learning dataset.
" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"The Amazon Resource Name (ARN) of the dataset that was imported.
" + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"The status of the ImportDataset operation.
A unique identifier for the job of importing the dataset.
" + } + } + }, + "ImportModelVersionRequest":{ + "type":"structure", + "required":[ + "SourceModelVersionArn", + "DatasetName", + "ClientToken" + ], + "members":{ + "SourceModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the model version to import.
" + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"The name for the machine learning model to be created. If the model already exists, Amazon Lookout for Equipment creates a new version. If you do not specify this field, it is filled with the name of the source model.
" + }, + "DatasetName":{ + "shape":"DatasetIdentifier", + "documentation":"The name of the dataset for the machine learning model being imported.
" + }, + "LabelsInputConfiguration":{"shape":"LabelsInputConfiguration"}, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.
", + "idempotencyToken":true + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"The Amazon Resource Name (ARN) of a role with permission to access the data source being used to create the machine learning model.
" + }, + "ServerSideKmsKeyId":{ + "shape":"NameOrArn", + "documentation":"Provides the identifier of the KMS key key used to encrypt model data by Amazon Lookout for Equipment.
" + }, + "Tags":{ + "shape":"TagList", + "documentation":"The tags associated with the machine learning model to be created.
" + } + } + }, + "ImportModelVersionResponse":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"The name for the machine learning model.
" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"The Amazon Resource Name (ARN) of the model being created.
" + }, + "ModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the model version being created.
" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"The version of the model being created.
" + }, + "Status":{ + "shape":"ModelVersionStatus", + "documentation":"The status of the ImportModelVersion operation.
The ID number for the AWS KMS key used to encrypt the inference output.
" + "documentation":"The ID number for the KMS key key used to encrypt the inference output.
" } }, "documentation":"Specifies configuration information for the output results from for the inference, including KMS key ID and output S3 location.
" @@ -1908,7 +2381,8 @@ "enum":[ "IN_PROGRESS", "SUCCESS", - "FAILED" + "FAILED", + "IMPORT_IN_PROGRESS" ] }, "IngestionS3InputConfiguration":{ @@ -1925,7 +2399,7 @@ }, "KeyPattern":{ "shape":"KeyPattern", - "documentation":"Pattern for matching the Amazon S3 files which will be used for ingestion. If no KeyPattern is provided, we will use the default hierarchy file structure, which is same as KeyPattern {prefix}/{component_name}/*
" + "documentation":"The pattern for matching the Amazon S3 files that will be used for ingestion. If the schema was created previously without any KeyPattern, then the default KeyPattern {prefix}/{component_name}/* is used to download files from Amazon S3 according to the schema. This field is required when ingestion is being done for the first time.
Valid Values: {prefix}/{component_name}_* | {prefix}/{component_name}/* | {prefix}/{component_name}[DELIMITER]* (Allowed delimiters : space, dot, underscore, hyphen)
" } }, "documentation":"Specifies S3 configuration information for the input data for the data ingestion job.
" @@ -2018,7 +2492,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"The ARN of the label group.
" + "documentation":"The Amazon Resource Name (ARN) of the label group.
" }, "CreatedAt":{ "shape":"Timestamp", @@ -2061,7 +2535,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"The ARN of the label group.
" + "documentation":"The Amazon Resource Name (ARN) of the label group.
" }, "StartTime":{ "shape":"Timestamp", @@ -2235,7 +2709,7 @@ }, "IntervalEndTime":{ "shape":"Timestamp", - "documentation":"Returns all the inference events with an end start time equal to or greater than less than the end time given
" + "documentation":"Returns all the inference events with an end start time equal to or greater than less than the end time given.
" } } }, @@ -2316,7 +2790,7 @@ }, "Status":{ "shape":"InferenceSchedulerStatus", - "documentation":"Specifies the current status of the inference schedulers to list.
" + "documentation":"Specifies the current status of the inference schedulers.
" } } }, @@ -2410,6 +2884,61 @@ } } }, + "ListModelVersionsRequest":{ + "type":"structure", + "required":["ModelName"], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"Then name of the machine learning model for which the model versions are to be listed.
" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"If the total number of results exceeds the limit that the response can display, the response returns an opaque pagination token indicating where to continue the listing of machine learning model versions. Use this token in the NextToken field in the request to list the next page of results.
Specifies the maximum number of machine learning model versions to list.
" + }, + "Status":{ + "shape":"ModelVersionStatus", + "documentation":"Filter the results based on the current status of the model version.
" + }, + "SourceType":{ + "shape":"ModelVersionSourceType", + "documentation":"Filter the results based on the way the model version was generated.
" + }, + "CreatedAtEndTime":{ + "shape":"Timestamp", + "documentation":"Filter results to return all the model versions created before this time.
" + }, + "CreatedAtStartTime":{ + "shape":"Timestamp", + "documentation":"Filter results to return all the model versions created after this time.
" + }, + "MaxModelVersion":{ + "shape":"ModelVersion", + "documentation":"Specifies the highest version of the model to return in the list.
" + }, + "MinModelVersion":{ + "shape":"ModelVersion", + "documentation":"Specifies the lowest version of the model to return in the list.
" + } + } + }, + "ListModelVersionsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"If the total number of results exceeds the limit that the response can display, the response returns an opaque pagination token indicating where to continue the listing of machine learning model versions. Use this token in the NextToken field in the request to list the next page of results.
Provides information on the specified model version, including the created time, model and dataset ARNs, and status.
" + } + } + }, "ListModelsRequest":{ "type":"structure", "members":{ @@ -2563,7 +3092,8 @@ "enum":[ "IN_PROGRESS", "SUCCESS", - "FAILED" + "FAILED", + "IMPORT_IN_PROGRESS" ] }, "ModelSummaries":{ @@ -2596,10 +3126,84 @@ "CreatedAt":{ "shape":"Timestamp", "documentation":"The time at which the specific model was created.
" + }, + "ActiveModelVersion":{ + "shape":"ModelVersion", + "documentation":"The model version that the inference scheduler uses to run an inference execution.
" + }, + "ActiveModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the model version that is set as active. The active model version is the model version that the inference scheduler uses to run an inference execution.
" } }, "documentation":"Provides information about the specified ML model, including dataset and model names and ARNs, as well as status.
" }, + "ModelVersion":{ + "type":"long", + "min":1 + }, + "ModelVersionArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/.+\\/.+\\/model-version\\/[0-9]{1,}$" + }, + "ModelVersionSourceType":{ + "type":"string", + "enum":[ + "TRAINING", + "RETRAINING", + "IMPORT" + ] + }, + "ModelVersionStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESS", + "FAILED", + "IMPORT_IN_PROGRESS", + "CANCELED" + ] + }, + "ModelVersionSummaries":{ + "type":"list", + "member":{"shape":"ModelVersionSummary"} + }, + "ModelVersionSummary":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"The name of the model that this model version is a version of.
" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"The Amazon Resource Name (ARN) of the model that this model version is a version of.
" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"The version of the model.
" + }, + "ModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the model version.
" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"The time when this model version was created.
" + }, + "Status":{ + "shape":"ModelVersionStatus", + "documentation":"The current status of the model version.
" + }, + "SourceType":{ + "shape":"ModelVersionSourceType", + "documentation":"Indicates how this model version was generated.
" + } + }, + "documentation":"Contains information about the specific model version.
" + }, "MonotonicValues":{ "type":"structure", "required":["Status"], @@ -2650,6 +3254,63 @@ "max":2048, "min":1 }, + "Policy":{ + "type":"string", + "max":20000, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "PolicyRevisionId":{ + "type":"string", + "max":50, + "pattern":"[0-9A-Fa-f]+" + }, + "PutResourcePolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "ResourcePolicy", + "ClientToken" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the resource for which the policy is being created.
" + }, + "ResourcePolicy":{ + "shape":"Policy", + "documentation":"The JSON-formatted resource policy to create.
" + }, + "PolicyRevisionId":{ + "shape":"PolicyRevisionId", + "documentation":"A unique identifier for a revision of the resource policy.
" + }, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.
", + "idempotencyToken":true + } + } + }, + "PutResourcePolicyResponse":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the resource for which the policy was created.
" + }, + "PolicyRevisionId":{ + "shape":"PolicyRevisionId", + "documentation":"A unique identifier for a revision of the resource policy.
" + } + } + }, + "ResourceArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:.+" + }, "ResourceNotFoundException":{ "type":"structure", "required":["Message"], @@ -2684,7 +3345,7 @@ }, "Key":{ "shape":"S3Key", - "documentation":"The AWS Key Management Service (AWS KMS) key being used to encrypt the S3 object. Without this key, data in the bucket is not accessible.
" + "documentation":"The Amazon Web Services Key Management Service (KMS key) key being used to encrypt the S3 object. Without this key, data in the bucket is not accessible.
" } }, "documentation":"Contains information about an S3 bucket.
" @@ -3028,6 +3689,52 @@ "members":{ } }, + "UpdateActiveModelVersionRequest":{ + "type":"structure", + "required":[ + "ModelName", + "ModelVersion" + ], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"The name of the machine learning model for which the active model version is being set.
" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"The version of the machine learning model for which the active model version is being set.
" + } + } + }, + "UpdateActiveModelVersionResponse":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"The name of the machine learning model for which the active model version was set.
" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"The Amazon Resource Name (ARN) of the machine learning model for which the active model version was set.
" + }, + "CurrentActiveVersion":{ + "shape":"ModelVersion", + "documentation":"The version that is currently active of the machine learning model for which the active model version was set.
" + }, + "PreviousActiveVersion":{ + "shape":"ModelVersion", + "documentation":"The previous version that was active of the machine learning model for which the active model version was set.
" + }, + "CurrentActiveVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the machine learning model version that is the current active model version.
" + }, + "PreviousActiveVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"The Amazon Resource Name (ARN) of the machine learning model version that was the previous active model version.
" + } + } + }, "UpdateInferenceSchedulerRequest":{ "type":"structure", "required":["InferenceSchedulerName"], @@ -3078,7 +3785,7 @@ "members":{ "Message":{"shape":"BoundedLengthString"} }, - "documentation":"The input fails to satisfy constraints specified by Amazon Lookout for Equipment or a related AWS service that's being utilized.
", + "documentation":"The input fails to satisfy constraints specified by Amazon Lookout for Equipment or a related Amazon Web Services service that's being utilized.
", "exception":true } }, From f9c319234a9a0d18525d843d9d057a28cc6cd62b Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:22 +0000 Subject: [PATCH 075/270] Amazon EventBridge Scheduler Update: This release introduces automatic deletion of schedules in EventBridge Scheduler. If configured, EventBridge Scheduler automatically deletes a schedule after the schedule has completed its last invocation. --- ...re-AmazonEventBridgeScheduler-6e2fc14.json | 6 + .../codegen-resources/endpoint-rule-set.json | 398 ++++++++++-------- .../codegen-resources/service-2.json | 25 +- 3 files changed, 252 insertions(+), 177 deletions(-) create mode 100644 .changes/next-release/feature-AmazonEventBridgeScheduler-6e2fc14.json diff --git a/.changes/next-release/feature-AmazonEventBridgeScheduler-6e2fc14.json b/.changes/next-release/feature-AmazonEventBridgeScheduler-6e2fc14.json new file mode 100644 index 000000000000..c41ed34c2dca --- /dev/null +++ b/.changes/next-release/feature-AmazonEventBridgeScheduler-6e2fc14.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon EventBridge Scheduler", + "contributor": "", + "description": "This release introduces automatic deletion of schedules in EventBridge Scheduler. If configured, EventBridge Scheduler automatically deletes a schedule after the schedule has completed its last invocation." +} diff --git a/services/scheduler/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/scheduler/src/main/resources/codegen-resources/endpoint-rule-set.json index 353356938684..4913426138fb 100644 --- a/services/scheduler/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/scheduler/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,168 +111,238 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scheduler-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://scheduler-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scheduler-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://scheduler-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scheduler.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://scheduler.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scheduler.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://scheduler.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/scheduler/src/main/resources/codegen-resources/service-2.json b/services/scheduler/src/main/resources/codegen-resources/service-2.json index 6c44b7c975db..f9dbfdb1ccbf 100644 --- a/services/scheduler/src/main/resources/codegen-resources/service-2.json +++ b/services/scheduler/src/main/resources/codegen-resources/service-2.json @@ -231,6 +231,13 @@ } }, "shapes":{ + "ActionAfterCompletion":{ + "type":"string", + "enum":[ + "NONE", + "DELETE" + ] + }, "AssignPublicIp":{ "type":"string", "enum":[ @@ -356,6 +363,10 @@ "Target" ], "members":{ + "ActionAfterCompletion":{ + "shape":"ActionAfterCompletion", + "documentation":"Specifies the action that EventBridge Scheduler applies to the schedule after the schedule completes invoking the target.
" + }, "ClientToken":{ "shape":"ClientToken", "documentation":"Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, EventBridge Scheduler uses a randomly generated token for the request to ensure idempotency.
", @@ -389,7 +400,7 @@ }, "ScheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"The expression that defines when the schedule runs. The following formats are supported.
at expression - at(yyyy-mm-ddThh:mm:ss)
rate expression - rate(unit value)
cron expression - cron(fields)
You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.
A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).
A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days
For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.
" + "documentation":"The expression that defines when the schedule runs. The following formats are supported.
at expression - at(yyyy-mm-ddThh:mm:ss)
rate expression - rate(value unit)
cron expression - cron(fields)
You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.
A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).
A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days
For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.
" }, "ScheduleExpressionTimezone":{ "shape":"ScheduleExpressionTimezone", @@ -670,6 +681,10 @@ "GetScheduleOutput":{ "type":"structure", "members":{ + "ActionAfterCompletion":{ + "shape":"ActionAfterCompletion", + "documentation":"Indicates the action that EventBridge Scheduler applies to the schedule after the schedule completes invoking the target.
" + }, "Arn":{ "shape":"ScheduleArn", "documentation":"The Amazon Resource Name (ARN) of the schedule.
" @@ -708,7 +723,7 @@ }, "ScheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"The expression that defines when the schedule runs. The following formats are supported.
at expression - at(yyyy-mm-ddThh:mm:ss)
rate expression - rate(unit value)
cron expression - cron(fields)
You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.
A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).
A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days
For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.
" + "documentation":"The expression that defines when the schedule runs. The following formats are supported.
at expression - at(yyyy-mm-ddThh:mm:ss)
rate expression - rate(value unit)
cron expression - cron(fields)
You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.
A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).
A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days
For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.
" }, "ScheduleExpressionTimezone":{ "shape":"ScheduleExpressionTimezone", @@ -1474,6 +1489,10 @@ "Target" ], "members":{ + "ActionAfterCompletion":{ + "shape":"ActionAfterCompletion", + "documentation":"Specifies the action that EventBridge Scheduler applies to the schedule after the schedule completes invoking the target.
" + }, "ClientToken":{ "shape":"ClientToken", "documentation":"Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, EventBridge Scheduler uses a randomly generated token for the request to ensure idempotency.
", @@ -1507,7 +1526,7 @@ }, "ScheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"The expression that defines when the schedule runs. The following formats are supported.
at expression - at(yyyy-mm-ddThh:mm:ss)
rate expression - rate(unit value)
cron expression - cron(fields)
You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.
A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).
A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days
For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.
" + "documentation":"The expression that defines when the schedule runs. The following formats are supported.
at expression - at(yyyy-mm-ddThh:mm:ss)
rate expression - rate(value unit)
cron expression - cron(fields)
You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.
A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).
A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days
For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.
" }, "ScheduleExpressionTimezone":{ "shape":"ScheduleExpressionTimezone", From 49fdec73daafe4d6f108f7907cd41551ae31a79a Mon Sep 17 00:00:00 2001 From: AWS <> Date: Mon, 31 Jul 2023 18:10:20 +0000 Subject: [PATCH 076/270] Amazon Route 53 Update: Amazon Route 53 now supports the Israel (Tel Aviv) Region (il-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. --- .../feature-AmazonRoute53-92b4a66.json | 6 ++++++ .../resources/codegen-resources/service-2.json | 15 +++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) create mode 100644 .changes/next-release/feature-AmazonRoute53-92b4a66.json diff --git a/.changes/next-release/feature-AmazonRoute53-92b4a66.json b/.changes/next-release/feature-AmazonRoute53-92b4a66.json new file mode 100644 index 000000000000..5705d0e3bc8e --- /dev/null +++ b/.changes/next-release/feature-AmazonRoute53-92b4a66.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "Amazon Route 53 now supports the Israel (Tel Aviv) Region (il-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region." +} diff --git a/services/route53/src/main/resources/codegen-resources/service-2.json b/services/route53/src/main/resources/codegen-resources/service-2.json index 80beec518616..f42dd5d8adc5 100644 --- a/services/route53/src/main/resources/codegen-resources/service-2.json +++ b/services/route53/src/main/resources/codegen-resources/service-2.json @@ -300,7 +300,7 @@ {"shape":"NoSuchTrafficPolicy"}, {"shape":"TrafficPolicyInstanceAlreadyExists"} ], - "documentation":"Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.
After you submit an CreateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance request completed successfully. For more information, see the State response element.
Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance created.
Gets information about a specified traffic policy instance.
Use GetTrafficPolicyInstance with the id of new traffic policy instance to confirm that the CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element.
In the Route 53 console, traffic policy instances are known as policy records.
Gets information about a specified traffic policy instance.
After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance request, there's a brief delay while Amazon Route 53 creates the resource record sets that are specified in the traffic policy definition. For more information, see the State response element.
In the Route 53 console, traffic policy instances are known as policy records.
After you submit a UpdateTrafficPolicyInstance request, there's a brief delay while Route 53 creates the resource record sets that are specified in the traffic policy definition. Use GetTrafficPolicyInstance with the id of updated traffic policy instance confirm that the UpdateTrafficPolicyInstance request completed successfully. For more information, see the State response element.
Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.
When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Route 53 performs the following operations:
Route 53 creates a new group of resource record sets based on the specified traffic policy. This is true regardless of how significant the differences are between the existing resource record sets and the new resource record sets.
When all of the new resource record sets have been created, Route 53 starts to respond to DNS queries for the root resource record set name (such as example.com) by using the new resource record sets.
Route 53 deletes the old group of resource record sets that are associated with the root resource record set name.
Updates the resource record sets in a specified hosted zone that were created based on the settings in a specified traffic policy version.
When you update a traffic policy instance, Amazon Route 53 continues to respond to DNS queries for the root resource record set name (such as example.com) while it replaces one group of resource record sets with another. Route 53 performs the following operations:
Route 53 creates a new group of resource record sets based on the specified traffic policy. This is true regardless of how significant the differences are between the existing resource record sets and the new resource record sets.
When all of the new resource record sets have been created, Route 53 starts to respond to DNS queries for the root resource record set name (such as example.com) by using the new resource record sets.
Route 53 deletes the old group of resource record sets that are associated with the root resource record set name.
Describes an alarm.
" }, + "AlarmList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "AlarmSpecification":{ + "type":"structure", + "members":{ + "Alarms":{ + "shape":"AlarmList", + "documentation":"The names of one or more CloudWatch alarms to monitor for the instance refresh.
" + } + }, + "documentation":"Specifies the CloudWatch alarm specification to use in an instance refresh.
" + }, "Alarms":{ "type":"list", "member":{"shape":"Alarm"} @@ -4714,7 +4728,7 @@ }, "AutoRollback":{ "shape":"AutoRollback", - "documentation":"(Optional) Indicates whether to roll back the Auto Scaling group to its previous configuration if the instance refresh fails. The default is false.
A rollback is not supported in the following situations:
There is no desired configuration specified for the instance refresh.
The Auto Scaling group has a launch template that uses an Amazon Web Services Systems Manager parameter instead of an AMI ID for the ImageId property.
The Auto Scaling group uses the launch template's $Latest or $Default version.
(Optional) Indicates whether to roll back the Auto Scaling group to its previous configuration if the instance refresh fails or a CloudWatch alarm threshold is met. The default is false.
A rollback is not supported in the following situations:
There is no desired configuration specified for the instance refresh.
The Auto Scaling group has a launch template that uses an Amazon Web Services Systems Manager parameter instead of an AMI ID for the ImageId property.
The Auto Scaling group uses the launch template's $Latest or $Default version.
For more information, see Undo changes with a rollback in the Amazon EC2 Auto Scaling User Guide.
" }, "ScaleInProtectedInstances":{ "shape":"ScaleInProtectedInstances", @@ -4723,6 +4737,10 @@ "StandbyInstances":{ "shape":"StandbyInstances", "documentation":"Choose the behavior that you want Amazon EC2 Auto Scaling to use if instances in Standby state are found.
The following lists the valid values:
Amazon EC2 Auto Scaling terminates instances that are in Standby.
Amazon EC2 Auto Scaling ignores instances that are in Standby and continues to replace instances that are in the InService state.
Amazon EC2 Auto Scaling waits one hour for you to return the instances to service. Otherwise, the instance refresh will fail.
(Optional) The CloudWatch alarm specification. CloudWatch alarms can be used to identify any issues and fail the operation if an alarm threshold is met.
" } }, "documentation":"Describes the preferences for an instance refresh.
" @@ -5191,7 +5209,7 @@ }, "Preferences":{ "shape":"RefreshPreferences", - "documentation":"Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum healthy percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following:
Auto rollback
Checkpoints
Skip matching
Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum healthy percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following:
Auto rollback
Checkpoints
CloudWatch alarms
Skip matching
If specified, monitoring jobs substract this time from the end time. For information about using offsets for scheduling monitoring jobs, see Schedule Model Quality Monitoring Jobs.
" + "documentation":"If specified, monitoring jobs subtract this time from the end time. For information about using offsets for scheduling monitoring jobs, see Schedule Model Quality Monitoring Jobs.
" } }, "documentation":"Input object for the batch transform job.
" @@ -12052,7 +12052,7 @@ "DataCaptureConfig":{"shape":"DataCaptureConfigSummary"}, "EndpointStatus":{ "shape":"EndpointStatus", - "documentation":"The status of the endpoint.
OutOfService: Endpoint is not available to take incoming requests.
Creating: CreateEndpoint is executing.
Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.
SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or deleted or re-scaled until it has completed. This maintenance operation does not change any customer-specified values such as VPC config, KMS encryption, model, instance type, or instance count.
RollingBack: Endpoint fails to scale up or down or change its variant weight and is in the process of rolling back to its previous configuration. Once the rollback completes, endpoint returns to an InService status. This transitional status only applies to an endpoint that has autoscaling enabled and is undergoing variant weight or capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called explicitly.
InService: Endpoint is available to process incoming requests.
Deleting: DeleteEndpoint is executing.
Failed: Endpoint could not be created, updated, or re-scaled. Use the FailureReason value returned by DescribeEndpoint for information about the failure. DeleteEndpoint is the only operation that can be performed on a failed endpoint.
The status of the endpoint.
OutOfService: Endpoint is not available to take incoming requests.
Creating: CreateEndpoint is executing.
Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.
SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or deleted or re-scaled until it has completed. This maintenance operation does not change any customer-specified values such as VPC config, KMS encryption, model, instance type, or instance count.
RollingBack: Endpoint fails to scale up or down or change its variant weight and is in the process of rolling back to its previous configuration. Once the rollback completes, endpoint returns to an InService status. This transitional status only applies to an endpoint that has autoscaling enabled and is undergoing variant weight or capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called explicitly.
InService: Endpoint is available to process incoming requests.
Deleting: DeleteEndpoint is executing.
Failed: Endpoint could not be created, updated, or re-scaled. Use the FailureReason value returned by DescribeEndpoint for information about the failure. DeleteEndpoint is the only operation that can be performed on a failed endpoint.
UpdateRollbackFailed: Both the rolling deployment and auto-rollback failed. Your endpoint is in service with a mix of the old and new endpoint configurations. For information about how to remedy this issue and restore the endpoint's status to InService, see Rolling Deployments.
The model latency percentile threshold.
" + "documentation":"The model latency percentile threshold. For custom load tests, specify the value as P95.
Specifies how many concurrent users to start with.
" + "documentation":"Specifies how many concurrent users to start with. The value should be between 1 and 3.
" }, "SpawnRate":{ "shape":"SpawnRate", @@ -26991,7 +27002,7 @@ }, "DurationInSeconds":{ "shape":"TrafficDurationInSeconds", - "documentation":"Specifies how long traffic phase should be.
" + "documentation":"Specifies how long a traffic phase should be. For custom load tests, the value should be between 120 and 3600. This value should not exceed JobDurationInSeconds.
Defines the traffic pattern.
" @@ -29054,7 +29065,7 @@ }, "JobDurationInSeconds":{ "shape":"JobDurationInSeconds", - "documentation":"Specifies the maximum duration of the job, in seconds.>
" + "documentation":"Specifies the maximum duration of the job, in seconds. The maximum value is 7200.
" }, "TrafficPattern":{ "shape":"TrafficPattern", @@ -29160,6 +29171,10 @@ "ModelLatencyThresholds":{ "shape":"ModelLatencyThresholds", "documentation":"The interval of time taken by a model to respond as viewed from SageMaker. The interval includes the local communication time taken to send the request and to fetch the response from the container of a model and the time taken to complete the inference in the container.
" + }, + "FlatInvocations":{ + "shape":"FlatInvocations", + "documentation":"Stops a load test when the number of invocations (TPS) peaks and flattens, which means that the instance has reached capacity. The default value is Stop. If you want the load test to continue after invocations have flattened, set the value to Continue.
Specifies conditions for stopping a job. When a job reaches a stopping condition limit, SageMaker ends the job.
" @@ -30716,6 +30731,24 @@ "STOPPED" ] }, + "Stairs":{ + "type":"structure", + "members":{ + "DurationInSeconds":{ + "shape":"TrafficDurationInSeconds", + "documentation":"Defines how long each traffic step should be.
" + }, + "NumberOfSteps":{ + "shape":"NumberOfSteps", + "documentation":"Specifies how many steps to perform during traffic.
" + }, + "UsersPerStep":{ + "shape":"UsersPerStep", + "documentation":"Specifies how many new users to spawn in each step.
" + } + }, + "documentation":"Defines the stairs traffic pattern for an Inference Recommender load test. This pattern type consists of multiple steps where the number of users increases at each step.
Specify either the stairs or phases traffic pattern.
" + }, "StartEdgeDeploymentStageRequest":{ "type":"structure", "required":[ @@ -31643,11 +31676,15 @@ "members":{ "TrafficType":{ "shape":"TrafficType", - "documentation":"Defines the traffic patterns.
" + "documentation":"Defines the traffic patterns. Choose either PHASES or STAIRS.
Defines the phases traffic specification.
" + }, + "Stairs":{ + "shape":"Stairs", + "documentation":"Defines the stairs traffic pattern.
" } }, "documentation":"Defines the traffic pattern of the load test.
" @@ -31688,7 +31725,10 @@ }, "TrafficType":{ "type":"string", - "enum":["PHASES"] + "enum":[ + "PHASES", + "STAIRS" + ] }, "TrainingContainerArgument":{ "type":"string", @@ -34333,6 +34373,11 @@ }, "documentation":"A collection of settings that apply to users of Amazon SageMaker Studio. These settings are specified when the CreateUserProfile API is called, and as DefaultUserSettings when the CreateDomain API is called.
SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain.
Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources.
In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.
Multi-node parallel jobs aren't supported on Spot Instances.
In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.
To create a compute environment that uses EKS resources, the caller must have permissions to call eks:DescribeCluster.
Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps:
Create a new compute environment with the new AMI.
Add the compute environment to an existing job queue.
Remove the earlier compute environment from your job queue.
Delete the earlier compute environment.
In April 2022, Batch added enhanced support for updating compute environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules:
Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role.
Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE or SPOT_CAPACITY_OPTIMIZED.
Set the update to latest image version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment.
Don't specify an AMI ID in imageId, imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to $Default or $Latest, by setting either a new default version for the launch template (if $Default) or by adding a new version to the launch template (if $Latest).
If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the version setting in the launch template (launchTemplate) is set to $Latest or $Default, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the launchTemplate wasn't updated.
Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources.
In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.
Multi-node parallel jobs aren't supported on Spot Instances.
In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.
To create a compute environment that uses EKS resources, the caller must have permissions to call eks:DescribeCluster.
Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps:
Create a new compute environment with the new AMI.
Add the compute environment to an existing job queue.
Remove the earlier compute environment from your job queue.
Delete the earlier compute environment.
In April 2022, Batch added enhanced support for updating compute environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules:
Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role.
Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED.
Set the update to latest image version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment.
Don't specify an AMI ID in imageId, imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to $Default or $Latest, by setting either a new default version for the launch template (if $Default) or by adding a new version to the launch template (if $Latest).
If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the version setting in the launch template (launchTemplate) is set to $Latest or $Default, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the launchTemplate wasn't updated.
The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide.
Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.
Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.
With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.
The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide.
Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.
Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.
The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.
With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.
The maximum number of vCPUs that a compute environment can support.
With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.
The maximum number of vCPUs that a compute environment can support.
With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.
The maximum number of Amazon EC2 vCPUs that an environment can reach.
With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.
The maximum number of Amazon EC2 vCPUs that an environment can reach.
With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.
The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.
When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.
Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.
With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.
The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.
When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.
Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.
The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.
With both BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.
The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64.
This parameter must be set to X86_64 for Windows containers.
The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64.
This parameter must be set to X86_64 for Windows containers.
An object that represents the compute environment architecture for Batch jobs on Fargate.
" From 526ed89282c940da33a35d8ea7f7fc191e48f25f Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 1 Aug 2023 18:07:52 +0000 Subject: [PATCH 083/270] Amazon CloudWatch Internet Monitor Update: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event. --- ...azonCloudWatchInternetMonitor-97f4c59.json | 6 + .../codegen-resources/endpoint-rule-set.json | 244 ++++++++---------- .../codegen-resources/service-2.json | 89 +++++-- 3 files changed, 175 insertions(+), 164 deletions(-) create mode 100644 .changes/next-release/feature-AmazonCloudWatchInternetMonitor-97f4c59.json diff --git a/.changes/next-release/feature-AmazonCloudWatchInternetMonitor-97f4c59.json b/.changes/next-release/feature-AmazonCloudWatchInternetMonitor-97f4c59.json new file mode 100644 index 000000000000..db8859f891ca --- /dev/null +++ b/.changes/next-release/feature-AmazonCloudWatchInternetMonitor-97f4c59.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon CloudWatch Internet Monitor", + "contributor": "", + "description": "This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event." +} diff --git a/services/internetmonitor/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/internetmonitor/src/main/resources/codegen-resources/endpoint-rule-set.json index 25b3af24f3aa..3de3fac3112f 100644 --- a/services/internetmonitor/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/internetmonitor/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -64,18 +64,28 @@ ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -83,19 +93,35 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], "type": "tree", "rules": [ { @@ -110,7 +136,7 @@ { "ref": "PartitionResult" }, - "supportsDualStack" + "supportsFIPS" ] } ] @@ -118,62 +144,10 @@ ], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://internetmonitor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, { "conditions": [], "endpoint": { - "url": "https://internetmonitor.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://internetmonitor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, @@ -183,95 +157,91 @@ }, { "conditions": [], - "type": "tree", - "rules": [ + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://internetmonitor.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "getAttr", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://internetmonitor-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "ref": "PartitionResult" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://internetmonitor.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "supportsFIPS" ] } ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://internetmonitor-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://internetmonitor.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/internetmonitor/src/main/resources/codegen-resources/service-2.json b/services/internetmonitor/src/main/resources/codegen-resources/service-2.json index e1cdcd204a5d..ae9f3da0679d 100644 --- a/services/internetmonitor/src/main/resources/codegen-resources/service-2.json +++ b/services/internetmonitor/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"LimitExceededException"}, {"shape":"ValidationException"} ], - "documentation":"Creates a monitor in Amazon CloudWatch Internet Monitor. A monitor is built based on information from the application resources that you add: Amazon Virtual Private Clouds (VPCs), Amazon CloudFront distributions, and WorkSpaces directories. Internet Monitor then publishes internet measurements from Amazon Web Services that are specific to the city-networks, that is, the locations and ASNs (typically internet service providers or ISPs), where clients access your application. For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide.
When you create a monitor, you set a maximum limit for the number of city-networks where client traffic is monitored. The city-network maximum that you choose is the limit, but you only pay for the number of city-networks that are actually monitored. You can change the maximum at any time by updating your monitor. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide.
", + "documentation":"Creates a monitor in Amazon CloudWatch Internet Monitor. A monitor is built based on information from the application resources that you add: VPCs, Network Load Balancers (NLBs), Amazon CloudFront distributions, and Amazon WorkSpaces directories. Internet Monitor then publishes internet measurements from Amazon Web Services that are specific to the city-networks. That is, the locations and ASNs (typically internet service providers or ISPs), where clients access your application. For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide.
When you create a monitor, you choose the percentage of traffic that you want to monitor. You can also set a maximum limit for the number of city-networks where client traffic is monitored, that caps the total traffic that Internet Monitor monitors. A city-network maximum is the limit of city-networks, but you only pay for the number of city-networks that are actually monitored. You can update your monitor at any time to change the percentage of traffic to monitor or the city-networks maximum. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide.
", "idempotent":true }, "DeleteMonitor":{ @@ -65,7 +65,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"Gets information the Amazon CloudWatch Internet Monitor has created and stored about a health event for a specified monitor. This information includes the impacted locations, and all of the information related to the event by location.
The information returned includes the performance, availability, and round-trip time impact, information about the network providers, the event type, and so on.
Information rolled up at the global traffic level is also returned, including the impact type and total traffic impact.
" + "documentation":"Gets information the Amazon CloudWatch Internet Monitor has created and stored about a health event for a specified monitor. This information includes the impacted locations, and all the information related to the event, by location.
The information returned includes the impact on performance, availability, and round-trip time, information about the network providers (ASNs), the event type, and so on.
Information rolled up at the global traffic level is also returned, including the impact type and total traffic impact.
" }, "GetMonitor":{ "name":"GetMonitor", @@ -99,7 +99,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"Lists all health events for a monitor in Amazon CloudWatch Internet Monitor. Returns all information for health events including the client location information the network cause and status, event start and end time, percentage of total traffic impacted, and status.
Health events that have start times during the time frame that is requested are not included in the list of health events.
Lists all health events for a monitor in Amazon CloudWatch Internet Monitor. Returns information for health events including the event start and end time and the status.
Health events that have start times during the time frame that is requested are not included in the list of health events.
Updates a monitor. You can update a monitor to change the maximum number of city-networks (locations and ASNs or internet service providers), to add or remove resources, or to change the status of the monitor. Note that you can't change the name of a monitor.
The city-network maximum that you choose is the limit, but you only pay for the number of city-networks that are actually monitored. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide.
", + "documentation":"Updates a monitor. You can update a monitor to change the percentage of traffic to monitor or the maximum number of city-networks (locations and ASNs), to add or remove resources, or to change the status of the monitor. Note that you can't change the name of a monitor.
The city-network maximum that you choose is the limit, but you only pay for the number of city-networks that are actually monitored. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide.
", "idempotent":true } }, @@ -223,14 +223,14 @@ }, "PercentOfTotalTrafficImpacted":{ "shape":"Double", - "documentation":"The percentage of impact caused by a health event for total traffic globally.
For information about how Internet Monitor calculates impact, see Inside Internet Monitor in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.
" + "documentation":"The impact on total traffic that a health event has, in increased latency or reduced availability. This is the percentage of how much latency has increased or availability has decreased during the event, compared to what is typical for traffic from this client location to the Amazon Web Services location using this client network.
For information about how Internet Monitor calculates impact, see How Internet Monitor works in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.
" }, "PercentOfClientLocationImpacted":{ "shape":"Double", "documentation":"The percentage of impact caused by a health event for client location traffic globally.
For information about how Internet Monitor calculates impact, see Inside Internet Monitor in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.
" } }, - "documentation":"Measurements about the availability for your application on the internet, calculated by Amazon CloudWatch Internet Monitor. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.
Availability in Internet Monitor represents the estimated percentage of traffic that is not seeing an availability drop. For example, an availability score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing an availability drop for that pair.
For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.
" + "documentation":"Amazon CloudWatch Internet Monitor calculates measurements about the availability for your application's internet traffic between client locations and Amazon Web Services. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.
Availability in Internet Monitor represents the estimated percentage of traffic that is not seeing an availability drop. For example, an availability score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing an availability drop for that pair.
For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.
" }, "BadRequestException":{ "type":"structure", @@ -266,7 +266,7 @@ }, "Resources":{ "shape":"SetOfARNs", - "documentation":"The resources to include in a monitor, which you provide as a set of Amazon Resource Names (ARNs).
You can add a combination of Amazon Virtual Private Clouds (VPCs) and Amazon CloudFront distributions, or you can add Amazon WorkSpaces directories. You can't add all three types of resources.
If you add only VPC resources, at least one VPC must have an Internet Gateway attached to it, to make sure that it has internet connectivity.
The resources to include in a monitor, which you provide as a set of Amazon Resource Names (ARNs). Resources can be VPCs, NLBs, Amazon CloudFront distributions, or Amazon WorkSpaces directories.
You can add a combination of VPCs and CloudFront distributions, or you can add WorkSpaces directories, or you can add NLBs. You can't add NLBs or WorkSpaces directories together with any other resources.
If you add only Amazon VPC resources, at least one VPC must have an Internet Gateway attached to it, to make sure that it has internet connectivity.
The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network or ASN, such as an internet service provider (ISP), that clients access the resources through. This limit helps control billing costs.
To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" + "documentation":"The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the ASN or network provider, such as an internet service provider (ISP), that clients access the resources through. Setting this limit can help control billing costs.
To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" }, "InternetMeasurementsLogDelivery":{ "shape":"InternetMeasurementsLogDelivery", @@ -287,11 +287,11 @@ }, "TrafficPercentageToMonitor":{ "shape":"TrafficPercentageToMonitor", - "documentation":"The percentage of the internet-facing traffic for your application that you want to monitor with this monitor.
" + "documentation":"The percentage of the internet-facing traffic for your application that you want to monitor with this monitor. If you set a city-networks maximum, that limit overrides the traffic percentage that you set.
To learn more, see Choosing an application traffic percentage to monitor in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" }, "HealthEventsConfig":{ "shape":"HealthEventsConfig", - "documentation":"Defines the health event threshold percentages, for performance score and availability score. Internet Monitor creates a health event when there's an internet issue that affects your application end users where a health score percentage is at or below a set threshold. If you don't set a health event threshold, the default calue is 95%.
" + "documentation":"Defines the threshold percentages and other configuration information for when Amazon CloudWatch Internet Monitor creates a health event. Internet Monitor creates a health event when an internet issue that affects your application end users has a health score percentage that is at or below a specific threshold, and, sometimes, when other criteria are met.
If you don't set a health event threshold, the default value is 95%.
For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.
" } } }, @@ -400,7 +400,7 @@ }, "PercentOfTotalTrafficImpacted":{ "shape":"Double", - "documentation":"The impact on total traffic that a health event has.
" + "documentation":"The impact on total traffic that a health event has, in increased latency or reduced availability. This is the percentage of how much latency has increased or availability has decreased during the event, compared to what is typical for traffic from this client location to the Amazon Web Services location using this client network.
" }, "ImpactType":{ "shape":"HealthEventImpactType", @@ -408,7 +408,7 @@ }, "HealthScoreThreshold":{ "shape":"Percentage", - "documentation":"The threshold percentage for health events when Amazon CloudWatch Internet Monitor creates a health event.
" + "documentation":"The threshold percentage for a health score that determines, along with other configuration information, when Internet Monitor creates a health event when there's an internet issue that affects your application end users.
" } } }, @@ -445,7 +445,7 @@ }, "Resources":{ "shape":"SetOfARNs", - "documentation":"The resources that have been added for the monitor. Resources are listed by their Amazon Resource Names (ARNs).
" + "documentation":"The resources monitored by the monitor. Resources are listed by their Amazon Resource Names (ARNs).
" }, "Status":{ "shape":"MonitorConfigState", @@ -473,7 +473,7 @@ }, "MaxCityNetworksToMonitor":{ "shape":"MaxCityNetworksToMonitor", - "documentation":"The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network or ASN, such as an internet service provider (ISP), that clients access the resources through. This limit helps control billing costs.
To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" + "documentation":"The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the ASN or network provider, such as an internet service provider (ISP), that clients access the resources through. This limit can help control billing costs.
To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" }, "InternetMeasurementsLogDelivery":{ "shape":"InternetMeasurementsLogDelivery", @@ -481,11 +481,11 @@ }, "TrafficPercentageToMonitor":{ "shape":"TrafficPercentageToMonitor", - "documentation":"The percentage of the internet-facing traffic for your application that you want to monitor with this monitor.
" + "documentation":"The percentage of the internet-facing traffic for your application to monitor with this monitor. If you set a city-networks maximum, that limit overrides the traffic percentage that you set.
To learn more, see Choosing an application traffic percentage to monitor in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" }, "HealthEventsConfig":{ "shape":"HealthEventsConfig", - "documentation":"The list of health event thresholds. A health event threshold percentage, for performance and availability, determines the level of impact at which Amazon CloudWatch Internet Monitor creates a health event when there's an internet issue that affects your application end users.
" + "documentation":"The list of health event threshold configurations. The threshold percentage for a health score determines, along with other configuration information, when Internet Monitor creates a health event when there's an internet issue that affects your application end users.
For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.
" } } }, @@ -535,7 +535,7 @@ }, "PercentOfTotalTrafficImpacted":{ "shape":"Double", - "documentation":"The impact on global traffic monitored by this monitor for this health event.
" + "documentation":"The impact on total traffic that a health event has, in increased latency or reduced availability. This is the percentage of how much latency has increased or availability has decreased during the event, compared to what is typical for traffic from this client location to the Amazon Web Services location using this client network.
" }, "ImpactType":{ "shape":"HealthEventImpactType", @@ -552,7 +552,9 @@ "type":"string", "enum":[ "AVAILABILITY", - "PERFORMANCE" + "PERFORMANCE", + "LOCAL_AVAILABILITY", + "LOCAL_PERFORMANCE" ] }, "HealthEventList":{ @@ -582,9 +584,17 @@ "PerformanceScoreThreshold":{ "shape":"Percentage", "documentation":"The health event threshold percentage set for performance scores.
" + }, + "AvailabilityLocalHealthEventsConfig":{ + "shape":"LocalHealthEventsConfig", + "documentation":"The configuration that determines the threshold and other conditions for when Internet Monitor creates a health event for a local availability issue.
" + }, + "PerformanceLocalHealthEventsConfig":{ + "shape":"LocalHealthEventsConfig", + "documentation":"The configuration that determines the threshold and other conditions for when Internet Monitor creates a health event for a local performance issue.
" } }, - "documentation":"A complex type for the configuration. Defines the health event threshold percentages, for performance score and availability score. Amazon CloudWatch Internet Monitor creates a health event when there's an internet issue that affects your application end users where a health score percentage is at or below a set threshold. If you don't set a health event threshold, the default value is 95%.
" + "documentation":"A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.
Defines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.
You can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.
If you don't set a health event threshold, the default value is 95%.
For local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.
For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.
" }, "ImpactedLocation":{ "type":"structure", @@ -692,7 +702,7 @@ "documentation":"Performance in Internet Monitor represents the estimated percentage of traffic that is not seeing a performance drop. For example, a performance score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing a performance drop for that pair.
For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" } }, - "documentation":"Internet health includes measurements calculated by Amazon CloudWatch Internet Monitor about the performance and availability for your application on the internet. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.
" + "documentation":"Internet health includes measurements calculated by Amazon CloudWatch Internet Monitor about the performance and availability for your application on the internet. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, Internet Monitor reports the information to you in the form of health scores: a performance score and an availability score.
" }, "InternetMeasurementsLogDelivery":{ "type":"structure", @@ -830,6 +840,31 @@ } } }, + "LocalHealthEventsConfig":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"LocalHealthEventsConfigStatus", + "documentation":"The status of whether Internet Monitor creates a health event based on a threshold percentage set for a local health score. The status can be ENABLED or DISABLED.
The health event threshold percentage set for a local health score.
" + }, + "MinTrafficImpact":{ + "shape":"Percentage", + "documentation":"The minimum percentage of overall traffic for an application that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.
" + } + }, + "documentation":"A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for a local performance or availability issue, when scores cross a threshold for one or more city-networks.
Defines the percentages, for performance scores or availability scores, that are the local thresholds for when Amazon CloudWatch Internet Monitor creates a health event. Also defines whether a local threshold is enabled or disabled, and the minimum percentage of overall traffic that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.
For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.
" + }, + "LocalHealthEventsConfigStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "LogDeliveryStatus":{ "type":"string", "enum":[ @@ -979,7 +1014,7 @@ }, "PercentOfTotalTrafficImpacted":{ "shape":"Double", - "documentation":"How much performance impact was caused by a health event for total traffic globally. For performance, this is the percentage of how much latency increased during the event compared to typical performance for your application traffic globally.
For more information, see When Amazon Web Services creates and resolves health events in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" + "documentation":"The impact on total traffic that a health event has, in increased latency or reduced availability. This is the percentage of how much latency has increased or availability has decreased during the event, compared to what is typical for traffic from this client location to the Amazon Web Services location using this client network.
For more information, see When Amazon Web Services creates and resolves health events in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" }, "PercentOfClientLocationImpacted":{ "shape":"Double", @@ -990,7 +1025,7 @@ "documentation":"This is the percentage of how much round-trip time increased during the event compared to typical round-trip time for your application for traffic.
For more information, see When Amazon Web Services creates and resolves health events in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" } }, - "documentation":"Measurements about the performance for your application on the internet calculated by Amazon CloudWatch Internet Monitor. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.
Performance in Internet Monitor represents the estimated percentage of traffic that is not seeing a performance drop. For example, a performance score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing a performance drop for that pair.
For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" + "documentation":"Amazon CloudWatch Internet Monitor calculates measurements about the performance for your application's internet traffic between client locations and Amazon Web Services. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.
Performance in Internet Monitor represents the estimated percentage of traffic that is not seeing a performance drop. For example, a performance score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing a performance drop for that pair.
For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" }, "ResourceName":{ "type":"string", @@ -1182,7 +1217,7 @@ }, "ResourcesToAdd":{ "shape":"SetOfARNs", - "documentation":"The resources to include in a monitor, which you provide as a set of Amazon Resource Names (ARNs).
You can add a combination of Amazon Virtual Private Clouds (VPCs) and Amazon CloudFront distributions, or you can add Amazon WorkSpaces directories. You can't add all three types of resources.
If you add only VPC resources, at least one VPC must have an Internet Gateway attached to it, to make sure that it has internet connectivity.
The resources to include in a monitor, which you provide as a set of Amazon Resource Names (ARNs). Resources can be VPCs, NLBs, Amazon CloudFront distributions, or Amazon WorkSpaces directories.
You can add a combination of VPCs and CloudFront distributions, or you can add WorkSpaces directories, or you can add NLBs. You can't add NLBs or WorkSpaces directories together with any other resources.
If you add only Amazon Virtual Private Clouds resources, at least one VPC must have an Internet Gateway attached to it, to make sure that it has internet connectivity.
The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network or ASN, such as an internet service provider, that clients access the resources through.
" + "documentation":"The maximum number of city-networks to monitor for your application. A city-network is the location (city) where clients access your application resources from and the ASN or network provider, such as an internet service provider (ISP), that clients access the resources through. Setting this limit can help control billing costs.
" }, "InternetMeasurementsLogDelivery":{ "shape":"InternetMeasurementsLogDelivery", @@ -1207,11 +1242,11 @@ }, "TrafficPercentageToMonitor":{ "shape":"TrafficPercentageToMonitor", - "documentation":"The percentage of the internet-facing traffic for your application that you want to monitor with this monitor.
" + "documentation":"The percentage of the internet-facing traffic for your application that you want to monitor with this monitor. If you set a city-networks maximum, that limit overrides the traffic percentage that you set.
To learn more, see Choosing an application traffic percentage to monitor in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.
" }, "HealthEventsConfig":{ "shape":"HealthEventsConfig", - "documentation":"The list of health event thresholds. A health event threshold percentage, for performance and availability, determines when Internet Monitor creates a health event when there's an internet issue that affects your application end users.
" + "documentation":"The list of health score thresholds. A threshold percentage for health scores, along with other configuration information, determines when Internet Monitor creates a health event when there's an internet issue that affects your application end users.
For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.
" } } }, @@ -1245,5 +1280,5 @@ "exception":true } }, - "documentation":"Amazon CloudWatch Internet Monitor provides visibility into how internet issues impact the performance and availability between your applications hosted on Amazon Web Services and your end users. It reduces the time it takes for you to diagnose internet issues from days to minutes. Internet Monitor uses the connectivity data that Amazon Web Services captures from its global networking footprint to calculate a baseline of performance and availability for internet traffic. This is the same data that Amazon Web Services uses to monitor internet uptime and availability. With those measurements as a baseline, Internet Monitor raises awareness for you when there are significant problems for your end users in the different geographic locations where your application runs.
Internet Monitor publishes internet measurements to CloudWatch Logs and CloudWatch Metrics, to easily support using CloudWatch tools with health information for geographies and networks specific to your application. Internet Monitor sends health events to Amazon EventBridge so that you can set up notifications. If an issue is caused by the Amazon Web Services network, you also automatically receive an Amazon Web Services Health Dashboard notification with the steps that Amazon Web Services is taking to mitigate the problem.
To use Internet Monitor, you create a monitor and associate your application's resources with it, VPCs, CloudFront distributions, or WorkSpaces directories, to enable Internet Monitor to know where your application's internet traffic is. Internet Monitor then provides internet measurements from Amazon Web Services that are specific to the locations and networks that communicate with your application.
For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide.
" + "documentation":"Amazon CloudWatch Internet Monitor provides visibility into how internet issues impact the performance and availability between your applications hosted on Amazon Web Services and your end users. It can reduce the time it takes for you to diagnose internet issues from days to minutes. Internet Monitor uses the connectivity data that Amazon Web Services captures from its global networking footprint to calculate a baseline of performance and availability for internet traffic. This is the same data that Amazon Web Services uses to monitor internet uptime and availability. With those measurements as a baseline, Internet Monitor raises awareness for you when there are significant problems for your end users in the different geographic locations where your application runs.
Internet Monitor publishes internet measurements to CloudWatch Logs and CloudWatch Metrics, to easily support using CloudWatch tools with health information for geographies and networks specific to your application. Internet Monitor sends health events to Amazon EventBridge so that you can set up notifications. If an issue is caused by the Amazon Web Services network, you also automatically receive an Amazon Web Services Health Dashboard notification with the steps that Amazon Web Services is taking to mitigate the problem.
To use Internet Monitor, you create a monitor and associate your application's resources with it - VPCs, NLBs, CloudFront distributions, or WorkSpaces directories - so Internet Monitor can determine where your application's internet traffic is. Internet Monitor then provides internet measurements from Amazon Web Services that are specific to the locations and ASNs (typically, internet service providers or ISPs) that communicate with your application.
For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide.
" } From f2e03cf6f4ba2eb7f3c66bd90127fc6f10b6a373 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 1 Aug 2023 18:07:53 +0000 Subject: [PATCH 084/270] Amazon Relational Database Service Update: Added support for deleted clusters PiTR. --- ...azonRelationalDatabaseService-4a34c80.json | 6 + .../codegen-resources/endpoint-rule-set.json | 362 ++++++++---------- .../codegen-resources/service-2.json | 269 ++++++++++++- 3 files changed, 424 insertions(+), 213 deletions(-) create mode 100644 .changes/next-release/feature-AmazonRelationalDatabaseService-4a34c80.json diff --git a/.changes/next-release/feature-AmazonRelationalDatabaseService-4a34c80.json b/.changes/next-release/feature-AmazonRelationalDatabaseService-4a34c80.json new file mode 100644 index 000000000000..b430d8ddcdb7 --- /dev/null +++ b/.changes/next-release/feature-AmazonRelationalDatabaseService-4a34c80.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Added support for deleted clusters PiTR." +} diff --git a/services/rds/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/rds/src/main/resources/codegen-resources/endpoint-rule-set.json index b9aff9f06c88..00bd15c36104 100644 --- a/services/rds/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/rds/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rds-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://rds-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://rds.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://rds-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://rds.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://rds-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rds.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://rds.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://rds.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://rds.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index fbff23ec2620..c50e1eb47d7e 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -673,10 +673,28 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterSnapshotAlreadyExistsFault"}, {"shape":"SnapshotQuotaExceededFault"}, - {"shape":"InvalidDBClusterSnapshotStateFault"} + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"DBClusterAutomatedBackupQuotaExceededFault"} ], "documentation":"The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.
If you're deleting a Multi-AZ DB cluster with read replicas, all cluster members are terminated and read replicas are promoted to standalone instances.
For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.
" }, + "DeleteDBClusterAutomatedBackup":{ + "name":"DeleteDBClusterAutomatedBackup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterAutomatedBackupMessage"}, + "output":{ + "shape":"DeleteDBClusterAutomatedBackupResult", + "resultWrapper":"DeleteDBClusterAutomatedBackupResult" + }, + "errors":[ + {"shape":"InvalidDBClusterAutomatedBackupStateFault"}, + {"shape":"DBClusterAutomatedBackupNotFoundFault"} + ], + "documentation":"Deletes automated backups using the DbClusterResourceId value of the source DB cluster or the Amazon Resource Name (ARN) of the automated backups.
Lists the set of CA certificates provided by Amazon RDS for this Amazon Web Services account.
For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
" }, + "DescribeDBClusterAutomatedBackups":{ + "name":"DescribeDBClusterAutomatedBackups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterAutomatedBackupsMessage"}, + "output":{ + "shape":"DBClusterAutomatedBackupMessage", + "resultWrapper":"DescribeDBClusterAutomatedBackupsResult" + }, + "errors":[ + {"shape":"DBClusterAutomatedBackupNotFoundFault"} + ], + "documentation":"Displays backups for both current and deleted DB clusters. For example, use this operation to find details about automated backups for previously deleted clusters. Current clusters are returned for both the DescribeDBClusterAutomatedBackups and DescribeDBClusters operations.
All parameters are optional.
" + }, "DescribeDBClusterBacktracks":{ "name":"DescribeDBClusterBacktracks", "http":{ @@ -2290,7 +2324,8 @@ {"shape":"OptionGroupNotFoundFault"}, {"shape":"StorageQuotaExceededFault"}, {"shape":"DomainNotFoundFault"}, - {"shape":"DBClusterParameterGroupNotFoundFault"} + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"DBClusterAutomatedBackupNotFoundFault"} ], "documentation":"Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.
For Aurora, this action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.
For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.
For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.
" }, @@ -5099,6 +5134,146 @@ }, "exception":true }, + "DBClusterAutomatedBackup":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"The name of the database engine for this automated backup.
" + }, + "VpcId":{ + "shape":"String", + "documentation":"The VPC ID associated with the DB cluster.
" + }, + "DBClusterAutomatedBackupsArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) for the automated backups.
" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"The identifier for the source DB cluster, which can't be changed and which is unique to an Amazon Web Services Region.
" + }, + "RestoreWindow":{"shape":"RestoreWindow"}, + "MasterUsername":{ + "shape":"String", + "documentation":"The master user name of the automated backup.
" + }, + "DbClusterResourceId":{ + "shape":"String", + "documentation":"The resource ID for the source DB cluster, which can't be changed and which is unique to an Amazon Web Services Region.
" + }, + "Region":{ + "shape":"String", + "documentation":"The Amazon Web Services Region associated with the automated backup.
" + }, + "LicenseModel":{ + "shape":"String", + "documentation":"The license model information for this DB cluster automated backup.
" + }, + "Status":{ + "shape":"String", + "documentation":"A list of status information for an automated backup:
retained - Automated backups for deleted clusters.
True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.
" + }, + "ClusterCreateTime":{ + "shape":"TStamp", + "documentation":"The time when the DB cluster was created, in Universal Coordinated Time (UTC).
" + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"Specifies whether the source DB cluster is encrypted.
" + }, + "AllocatedStorage":{ + "shape":"Integer", + "documentation":"For all database engines except Amazon Aurora, AllocatedStorage specifies the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage always returns 1, because Aurora DB cluster storage size isn't fixed, but instead automatically adjusts as needed.
The version of the database engine for the automated backup.
" + }, + "DBClusterArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) for the source DB cluster.
" + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"The retention period for the automated backups.
" + }, + "EngineMode":{ + "shape":"String", + "documentation":"The engine mode of the database engine for the automated backup.
" + }, + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"The Availability Zones where instances in the DB cluster can be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones.
" + }, + "Port":{ + "shape":"Integer", + "documentation":"The port number that the automated backup used for connections.
Default: Inherits from the source DB cluster
Valid Values: 1150-65535
The Amazon Web Services KMS key ID for an automated backup.
The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.
" + }, + "StorageType":{ + "shape":"String", + "documentation":"The storage type associated with the DB cluster.
This setting is only for non-Aurora Multi-AZ DB clusters.
" + }, + "Iops":{ + "shape":"IntegerOptional", + "documentation":"The IOPS (I/O operations per second) value for the automated backup.
This setting is only for non-Aurora Multi-AZ DB clusters.
" + } + }, + "documentation":"An automated backup of a DB cluster. It consists of system backups, transaction logs, and the database cluster properties that existed at the time you deleted the source cluster.
", + "wrapper":true + }, + "DBClusterAutomatedBackupList":{ + "type":"list", + "member":{ + "shape":"DBClusterAutomatedBackup", + "locationName":"DBClusterAutomatedBackup" + } + }, + "DBClusterAutomatedBackupMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.
A list of DBClusterAutomatedBackup backups.
No automated backup for this DB cluster was found.
", + "error":{ + "code":"DBClusterAutomatedBackupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterAutomatedBackupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"The quota for retained automated backups was exceeded. This prevents you from retaining any additional automated backups. The retained automated backups quota is the same as your DB cluster quota.
", + "error":{ + "code":"DBClusterAutomatedBackupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "DBClusterBacktrack":{ "type":"structure", "members":{ @@ -5609,7 +5784,7 @@ }, "DBClusterSnapshotArn":{ "shape":"String", - "documentation":"The Amazon Resource Name (ARN) for the DB cluster snapshot.
" + "documentation":"Specifies the Amazon Resource Name (ARN) for the DB cluster snapshot.
" }, "SourceDBClusterSnapshotArn":{ "shape":"String", @@ -5627,6 +5802,10 @@ "StorageType":{ "shape":"String", "documentation":"The storage type associated with the DB cluster snapshot.
This setting is only for Aurora DB clusters.
" + }, + "DbClusterResourceId":{ + "shape":"String", + "documentation":"Specifies the resource ID of the DB cluster that this DB cluster snapshot was created from.
" } }, "documentation":"Contains the details for an Amazon RDS DB cluster snapshot
This data type is used as a response element in the DescribeDBClusterSnapshots action.
The identifier for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.
" + "documentation":"The resource ID for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.
" }, "Region":{ "shape":"String", @@ -6226,7 +6405,7 @@ }, "DBInstanceIdentifier":{ "shape":"String", - "documentation":"The customer id of the instance that is/was associated with the automated backup.
" + "documentation":"The identifier for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.
" }, "RestoreWindow":{ "shape":"RestoreWindow", @@ -6238,7 +6417,7 @@ }, "Status":{ "shape":"String", - "documentation":"Provides a list of status information for an automated backup:
active - automated backups for current instances
retained - automated backups for deleted instances
creating - automated backups that are waiting for the first automated snapshot to be available.
Provides a list of status information for an automated backup:
active - Automated backups for current instances.
retained - Automated backups for deleted instances.
creating - Automated backups that are waiting for the first automated snapshot to be available.
The license model of an automated backup.
" + "documentation":"The master user name of an automated backup.
" }, "Engine":{ "shape":"String", @@ -6365,7 +6544,7 @@ "type":"structure", "members":{ }, - "documentation":"The quota for retained automated backups was exceeded. This prevents you from retaining any additional automated backups. The retained automated backups quota is the same as your DB Instance quota.
", + "documentation":"The quota for retained automated backups was exceeded. This prevents you from retaining any additional automated backups. The retained automated backups quota is the same as your DB instance quota.
", "error":{ "code":"DBInstanceAutomatedBackupQuotaExceeded", "httpStatusCode":400, @@ -7539,6 +7718,22 @@ } } }, + "DeleteDBClusterAutomatedBackupMessage":{ + "type":"structure", + "required":["DbClusterResourceId"], + "members":{ + "DbClusterResourceId":{ + "shape":"String", + "documentation":"The identifier for the source DB cluster, which can't be changed and which is unique to an Amazon Web Services Region.
" + } + } + }, + "DeleteDBClusterAutomatedBackupResult":{ + "type":"structure", + "members":{ + "DBClusterAutomatedBackup":{"shape":"DBClusterAutomatedBackup"} + } + }, "DeleteDBClusterEndpointMessage":{ "type":"structure", "required":["DBClusterEndpointIdentifier"], @@ -7564,6 +7759,10 @@ "FinalDBSnapshotIdentifier":{ "shape":"String", "documentation":"The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled.
Specifying this parameter and also skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter results in an error.
Constraints:
Must be 1 to 255 letters, numbers, or hyphens.
First character must be a letter
Can't end with a hyphen or contain two consecutive hyphens
A value that indicates whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted.
" } }, "documentation":"" @@ -7872,6 +8071,31 @@ }, "documentation":"" }, + "DescribeDBClusterAutomatedBackupsMessage":{ + "type":"structure", + "members":{ + "DbClusterResourceId":{ + "shape":"String", + "documentation":"The resource ID of the DB cluster that is the source of the automated backup. This parameter isn't case-sensitive.
" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"(Optional) The user-supplied DB cluster identifier. If this parameter is specified, it must match the identifier of an existing DB cluster. It returns information from the specific DB cluster's automated backup. This parameter isn't case-sensitive.
" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"A filter that specifies which resources to return based on status.
Supported filters are the following:
status
retained - Automated backups for deleted clusters and after backup replication is stopped.
db-cluster-id - Accepts DB cluster identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB cluster automated backups identified by these ARNs.
db-cluster-resource-id - Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB cluster resources identified by these ARNs.
Returns all resources by default. The status for each resource is specified in the response.
" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.
The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.
A value that indicates whether to include manual DB cluster snapshots that are public and can be copied or restored by any Amazon Web Services account. By default, the public snapshots are not included.
You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action.
" + }, + "DbClusterResourceId":{ + "shape":"String", + "documentation":"A specific DB cluster resource ID to describe.
" } }, "documentation":"" @@ -8108,11 +8336,11 @@ }, "DBInstanceIdentifier":{ "shape":"String", - "documentation":"(Optional) The user-supplied instance identifier. If this parameter is specified, it must match the identifier of an existing DB instance. It returns information from the specific DB instance' automated backup. This parameter isn't case-sensitive.
" + "documentation":"(Optional) The user-supplied instance identifier. If this parameter is specified, it must match the identifier of an existing DB instance. It returns information from the specific DB instance's automated backup. This parameter isn't case-sensitive.
" }, "Filters":{ "shape":"FilterList", - "documentation":"A filter that specifies which resources to return based on status.
Supported filters are the following:
status
active - automated backups for current instances
retained - automated backups for deleted instances and after backup replication is stopped
creating - automated backups that are waiting for the first automated snapshot to be available
db-instance-id - Accepts DB instance identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance automated backups identified by these ARNs.
dbi-resource-id - Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance resources identified by these ARNs.
Returns all resources by default. The status for each resource is specified in the response.
" + "documentation":"A filter that specifies which resources to return based on status.
Supported filters are the following:
status
active - Automated backups for current instances.
creating - Automated backups that are waiting for the first automated snapshot to be available.
retained - Automated backups for deleted instances and after backup replication is stopped.
db-instance-id - Accepts DB instance identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance automated backups identified by these ARNs.
dbi-resource-id - Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance resources identified by these ARNs.
Returns all resources by default. The status for each resource is specified in the response.
" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -9861,6 +10089,18 @@ }, "exception":true }, + "InvalidDBClusterAutomatedBackupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"The automated backup is in an invalid state. For example, this automated backup is associated with an active cluster.
", + "error":{ + "code":"InvalidDBClusterAutomatedBackupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidDBClusterCapacityFault":{ "type":"structure", "members":{ @@ -12926,10 +13166,7 @@ }, "RestoreDBClusterToPointInTimeMessage":{ "type":"structure", - "required":[ - "DBClusterIdentifier", - "SourceDBClusterIdentifier" - ], + "required":["DBClusterIdentifier"], "members":{ "DBClusterIdentifier":{ "shape":"String", @@ -13032,6 +13269,10 @@ "NetworkType":{ "shape":"String", "documentation":"The network type of the DB cluster.
Valid values:
IPV4
DUAL
The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).
For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.
Valid for: Aurora DB clusters only
" + }, + "SourceDbClusterResourceId":{ + "shape":"String", + "documentation":"The resource ID of the source DB cluster from which to restore.
" } }, "documentation":"" From 4f5617488f718f1c177596b156ddac76f9b2180c Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 1 Aug 2023 18:07:56 +0000 Subject: [PATCH 085/270] AWS Database Migration Service Update: Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version. --- ...e-AWSDatabaseMigrationService-3778717.json | 6 ++ .../codegen-resources/paginators-1.json | 5 ++ .../codegen-resources/service-2.json | 84 ++++++++++++++++++- 3 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-AWSDatabaseMigrationService-3778717.json diff --git a/.changes/next-release/feature-AWSDatabaseMigrationService-3778717.json b/.changes/next-release/feature-AWSDatabaseMigrationService-3778717.json new file mode 100644 index 000000000000..96c58a4bc78f --- /dev/null +++ b/.changes/next-release/feature-AWSDatabaseMigrationService-3778717.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version." +} diff --git a/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json b/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json index b9b289acd546..36ac78fa4eba 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json +++ b/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json @@ -30,6 +30,11 @@ "output_token": "Marker", "limit_key": "MaxRecords" }, + "DescribeEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeEventSubscriptions": { "input_token": "Marker", "output_token": "Marker", diff --git a/services/databasemigration/src/main/resources/codegen-resources/service-2.json b/services/databasemigration/src/main/resources/codegen-resources/service-2.json index 7a32af8d2f64..87494e589dc0 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/service-2.json +++ b/services/databasemigration/src/main/resources/codegen-resources/service-2.json @@ -444,6 +444,16 @@ ], "documentation":"Returns information about the endpoints for your account in the current region.
" }, + "DescribeEngineVersions":{ + "name":"DescribeEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineVersionsMessage"}, + "output":{"shape":"DescribeEngineVersionsResponse"}, + "documentation":"Returns information about the replication instance versions used in the project.
" + }, "DescribeEventCategories":{ "name":"DescribeEventCategories", "http":{ @@ -1272,6 +1282,10 @@ "type":"list", "member":{"shape":"String"} }, + "AvailableUpgradesList":{ + "type":"list", + "member":{"shape":"String"} + }, "BatchStartRecommendationsErrorEntry":{ "type":"structure", "members":{ @@ -2687,6 +2701,32 @@ }, "documentation":"" }, + "DescribeEngineVersionsMessage":{ + "type":"structure", + "members":{ + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
Returned EngineVersion objects that describe the replication instance engine versions used in the project.
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
The version number of the replication instance.
" + }, + "Lifecycle":{ + "shape":"String", + "documentation":"The lifecycle status of the replication instance version. Valid values are DEPRECATED, DEFAULT_VERSION, and ACTIVE.
The release status of the replication instance version.
" + }, + "LaunchDate":{ + "shape":"TStamp", + "documentation":"The date when the replication instance version became publicly available.
" + }, + "AutoUpgradeDate":{ + "shape":"TStamp", + "documentation":"The date when the replication instance will be automatically upgraded. This setting only applies if the auto-minor-version setting is enabled.
The date when the replication instance version will be deprecated and can no longer be requested.
" + }, + "ForceUpgradeDate":{ + "shape":"TStamp", + "documentation":"The date when the replication instance will have a version upgrade forced.
" + }, + "AvailableUpgrades":{ + "shape":"AvailableUpgradesList", + "documentation":"The list of valid replication instance versions that you can upgrade to.
" + } + }, + "documentation":"Provides information about a replication instance version.
" + }, + "EngineVersionList":{ + "type":"list", + "member":{"shape":"EngineVersion"} + }, "Event":{ "type":"structure", "members":{ @@ -5592,7 +5674,7 @@ }, "DatabaseMode":{ "shape":"DatabaseMode", - "documentation":"Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You can use this setting to specify replication behavior for endpoints that require additional configuration, such as Babelfish endpoints.
" + "documentation":"Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
" }, "BabelfishDatabaseName":{ "shape":"String", From 87e2c72d1d2a54598a1ecef09189bd912262270d Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 1 Aug 2023 18:07:53 +0000 Subject: [PATCH 086/270] AWS Elemental MediaLive Update: AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone. --- ...feature-AWSElementalMediaLive-f60ad9b.json | 6 +++ .../codegen-resources/service-2.json | 38 +++++++++++++++++-- 2 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 .changes/next-release/feature-AWSElementalMediaLive-f60ad9b.json diff --git a/.changes/next-release/feature-AWSElementalMediaLive-f60ad9b.json b/.changes/next-release/feature-AWSElementalMediaLive-f60ad9b.json new file mode 100644 index 000000000000..e0ad020b38ce --- /dev/null +++ b/.changes/next-release/feature-AWSElementalMediaLive-f60ad9b.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone." +} diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index a82d641f438f..12e6a1f61799 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -6137,10 +6137,15 @@ "locationName": "uhdDeviceSettings", "documentation": "Settings that describe an input device that is type UHD." }, - "Tags": { + "Tags": { "shape": "Tags", "locationName": "tags", "documentation": "A collection of key-value pairs." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone associated with this input device." } }, "documentation": "Placeholder documentation for DescribeInputDeviceResponse" @@ -9788,10 +9793,15 @@ "locationName": "uhdDeviceSettings", "documentation": "Settings that describe an input device that is type UHD." }, - "Tags": { + "Tags": { "shape": "Tags", "locationName": "tags", "documentation": "A collection of key-value pairs." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone associated with this input device." } }, "documentation": "An input device." @@ -10049,10 +10059,15 @@ "locationName": "uhdDeviceSettings", "documentation": "Settings that describe an input device that is type UHD." }, - "Tags": { + "Tags": { "shape": "Tags", "locationName": "tags", "documentation": "A collection of key-value pairs." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone associated with this input device." } }, "documentation": "Details of the input device." @@ -15721,6 +15736,11 @@ "shape": "InputDeviceConfigurableSettings", "locationName": "uhdDeviceSettings", "documentation": "The settings that you want to apply to the UHD input device." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone you want associated with this input device." } }, "documentation": "Updates an input device." @@ -15748,6 +15768,11 @@ "shape": "InputDeviceConfigurableSettings", "locationName": "uhdDeviceSettings", "documentation": "The settings that you want to apply to the UHD input device." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone you want associated with this input device." } }, "documentation": "A request to update an input device.", @@ -15818,10 +15843,15 @@ "locationName": "uhdDeviceSettings", "documentation": "Settings that describe an input device that is type UHD." }, - "Tags": { + "Tags": { "shape": "Tags", "locationName": "tags", "documentation": "A collection of key-value pairs." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone associated with this input device." } }, "documentation": "Placeholder documentation for UpdateInputDeviceResponse" From c54db000ff441b0f155f3b66ca6176e8364de83a Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 1 Aug 2023 18:07:57 +0000 Subject: [PATCH 087/270] Amazon Polly Update: Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only. --- .changes/next-release/feature-AmazonPolly-af7d385.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 6 ++++-- 2 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AmazonPolly-af7d385.json diff --git a/.changes/next-release/feature-AmazonPolly-af7d385.json b/.changes/next-release/feature-AmazonPolly-af7d385.json new file mode 100644 index 000000000000..375c126b7ca5 --- /dev/null +++ b/.changes/next-release/feature-AmazonPolly-af7d385.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only." +} diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index 7b50fd662216..1b35df9150e3 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -423,7 +423,8 @@ "ar-AE", "fi-FI", "en-IE", - "nl-BE" + "nl-BE", + "fr-BE" ] }, "LanguageCodeList":{ @@ -1106,7 +1107,8 @@ "Tomoko", "Niamh", "Sofie", - "Lisa" + "Lisa", + "Isabelle" ] }, "VoiceList":{ From 88abbfb3602c549ecaa4320c5dcfc08b4618c623 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 1 Aug 2023 18:09:12 +0000 Subject: [PATCH 088/270] Updated endpoints.json and partitions.json. --- .../feature-AWSSDKforJavav2-0443982.json | 6 + .../codegen/rules/partitions.json.resource | 2 +- .../regions/internal/region/endpoints.json | 151 +++++++++++++++++- 3 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json new file mode 100644 index 000000000000..e5b5ee3ca5e3 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." +} diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource index 2018b804f3d7..a5b3af1ed634 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource @@ -8,7 +8,7 @@ "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", "regions" : { "af-south-1" : { "description" : "Africa (Cape Town)" diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index aa5b95cb3f99..77b5d0162f2d 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -21,7 +21,7 @@ "dnsSuffix" : "amazonaws.com", "partition" : "aws", "partitionName" : "AWS Standard", - "regionRegex" : "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", "regions" : { "af-south-1" : { "description" : "Africa (Cape Town)" @@ -83,6 +83,9 @@ "eu-west-3" : { "description" : "Europe (Paris)" }, + "il-central-1" : { + "description" : "Israel (Tel Aviv)" + }, "me-central-1" : { "description" : "Middle East (UAE)" }, @@ -173,6 +176,7 @@ "deprecated" : true, "hostname" : "access-analyzer-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -248,6 +252,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -796,6 +801,12 @@ "deprecated" : true, "hostname" : "ecr-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "api.ecr.il-central-1.amazonaws.com" + }, "me-central-1" : { "credentialScope" : { "region" : "me-central-1" @@ -1086,6 +1097,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1294,6 +1306,7 @@ "deprecated" : true, "hostname" : "apigateway-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1359,6 +1372,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1390,6 +1404,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1444,6 +1459,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1859,6 +1875,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1906,6 +1923,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "athena.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ap-southeast-1" : { "variants" : [ { "hostname" : "athena.ap-southeast-1.api.aws", @@ -1924,6 +1947,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "athena.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "athena.ca-central-1.api.aws", @@ -1936,6 +1965,12 @@ "tags" : [ "dualstack" ] } ] }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "athena.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "eu-north-1" : { "variants" : [ { "hostname" : "athena.eu-north-1.api.aws", @@ -1948,6 +1983,12 @@ "tags" : [ "dualstack" ] } ] }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "athena.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "eu-west-1" : { "variants" : [ { "hostname" : "athena.eu-west-1.api.aws", @@ -2103,6 +2144,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2551,6 +2593,7 @@ "deprecated" : true, "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2615,6 +2658,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2714,6 +2758,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2787,6 +2832,7 @@ "deprecated" : true, "hostname" : "cloudtrail-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2881,6 +2927,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -3064,6 +3111,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -3717,6 +3765,7 @@ "deprecated" : true, "hostname" : "config-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -3821,6 +3870,8 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, @@ -4533,6 +4584,7 @@ "deprecated" : true, "hostname" : "directconnect-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -4581,6 +4633,7 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, @@ -4642,6 +4695,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -4880,6 +4934,7 @@ "deprecated" : true, "hostname" : "ds-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -4946,6 +5001,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "local" : { "credentialScope" : { "region" : "us-east-1" @@ -5072,6 +5128,7 @@ "deprecated" : true, "hostname" : "ebs-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -5176,6 +5233,7 @@ "deprecated" : true, "hostname" : "ec2-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { @@ -5269,6 +5327,7 @@ "deprecated" : true, "hostname" : "ecs-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -5365,6 +5424,7 @@ "deprecated" : true, "hostname" : "fips.eks.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -5423,6 +5483,7 @@ "deprecated" : true, "hostname" : "elasticache-fips.us-west-1.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -5526,6 +5587,7 @@ "deprecated" : true, "hostname" : "elasticbeanstalk-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -5962,6 +6024,7 @@ "deprecated" : true, "hostname" : "elasticloadbalancing-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6059,6 +6122,7 @@ "deprecated" : true, "hostname" : "elasticmapreduce-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6167,6 +6231,7 @@ }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -6354,6 +6419,7 @@ "deprecated" : true, "hostname" : "es-fips.us-west-1.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6461,6 +6527,7 @@ "deprecated" : true, "hostname" : "events-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6589,6 +6656,7 @@ "deprecated" : true, "hostname" : "firehose-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -7353,6 +7421,7 @@ "deprecated" : true, "hostname" : "glue-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -8016,6 +8085,9 @@ "eu-west-3" : { "hostname" : "internetmonitor.eu-west-3.api.aws" }, + "il-central-1" : { + "hostname" : "internetmonitor.il-central-1.api.aws" + }, "me-central-1" : { "hostname" : "internetmonitor.me-central-1.api.aws" }, @@ -8923,6 +8995,9 @@ "eu-west-3" : { "hostname" : "kendra-ranking.eu-west-3.api.aws" }, + "il-central-1" : { + "hostname" : "kendra-ranking.il-central-1.api.aws" + }, "me-central-1" : { "hostname" : "kendra-ranking.me-central-1.api.aws" }, @@ -9008,6 +9083,7 @@ "deprecated" : true, "hostname" : "kinesis-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -9357,10 +9433,17 @@ "deprecated" : true, "hostname" : "kms-fips.eu-west-3.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "kms-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "il-central-1-fips" : { "credentialScope" : { "region" : "il-central-1" }, + "deprecated" : true, "hostname" : "kms-fips.il-central-1.amazonaws.com" }, "me-central-1" : { @@ -9683,6 +9766,12 @@ "deprecated" : true, "hostname" : "lambda-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "lambda.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "lambda.me-central-1.api.aws", @@ -10033,6 +10122,7 @@ "deprecated" : true, "hostname" : "logs-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10639,6 +10729,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10670,6 +10761,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10909,6 +11001,7 @@ "deprecated" : true, "hostname" : "monitoring-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -11278,6 +11371,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -11715,6 +11809,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -12264,6 +12359,7 @@ "deprecated" : true, "hostname" : "ram-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -12355,6 +12451,7 @@ "deprecated" : true, "hostname" : "rbin-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -12418,6 +12515,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "rds-fips.ca-central-1" : { @@ -12689,6 +12787,7 @@ "deprecated" : true, "hostname" : "redshift-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -12978,6 +13077,9 @@ "eu-west-3" : { "hostname" : "resource-explorer-2.eu-west-3.api.aws" }, + "il-central-1" : { + "hostname" : "resource-explorer-2.il-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "resource-explorer-2.sa-east-1.api.aws" }, @@ -13045,6 +13147,7 @@ "deprecated" : true, "hostname" : "resource-groups-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -13285,6 +13388,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -13533,6 +13637,12 @@ "deprecated" : true, "hostname" : "s3-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.il-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "s3.dualstack.me-central-1.amazonaws.com", @@ -14074,6 +14184,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -14589,6 +14700,12 @@ "tags" : [ "dualstack" ] } ] }, + "il-central-1" : { + "variants" : [ { + "hostname" : "servicediscovery.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "servicediscovery.me-central-1.api.aws", @@ -15219,6 +15336,7 @@ "deprecated" : true, "hostname" : "sns-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15302,6 +15420,7 @@ "deprecated" : true, "hostname" : "sqs-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15394,6 +15513,7 @@ "deprecated" : true, "hostname" : "ssm-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15733,6 +15853,7 @@ "deprecated" : true, "hostname" : "states-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15881,6 +16002,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "local" : { "credentialScope" : { "region" : "us-east-1" @@ -15925,6 +16047,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16051,6 +16174,7 @@ "deprecated" : true, "hostname" : "swf-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16130,6 +16254,7 @@ "deprecated" : true, "hostname" : "synthetics-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16181,6 +16306,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -17118,6 +17244,7 @@ "credentialScope" : { "region" : "il-central-1" }, + "deprecated" : true, "hostname" : "waf-regional-fips.il-central-1.amazonaws.com" }, "fips-me-central-1" : { @@ -17169,6 +17296,16 @@ "deprecated" : true, "hostname" : "waf-regional-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "waf-regional.il-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "me-central-1" : { "credentialScope" : { "region" : "me-central-1" @@ -17587,6 +17724,7 @@ "credentialScope" : { "region" : "il-central-1" }, + "deprecated" : true, "hostname" : "wafv2-fips.il-central-1.amazonaws.com" }, "fips-me-central-1" : { @@ -17638,6 +17776,16 @@ "deprecated" : true, "hostname" : "wafv2-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "wafv2.il-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "me-central-1" : { "credentialScope" : { "region" : "me-central-1" @@ -17911,6 +18059,7 @@ "deprecated" : true, "hostname" : "xray-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, From 96a4e844a0d296f3df2e9b2f43cba078371aa802 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Tue, 1 Aug 2023 18:10:15 +0000 Subject: [PATCH 089/270] Release 2.20.117. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.117.json | 54 +++++++++++++++++++ .../feature-AWSBatch-f3e1441.json | 6 --- ...e-AWSDatabaseMigrationService-3778717.json | 6 --- ...feature-AWSElementalMediaLive-f60ad9b.json | 6 --- .../feature-AWSSDKforJavav2-0443982.json | 6 --- ...azonCloudWatchInternetMonitor-97f4c59.json | 6 --- .../feature-AmazonPolly-af7d385.json | 6 --- ...azonRelationalDatabaseService-4a34c80.json | 6 --- ...eature-AmazonSageMakerService-c9291c9.json | 6 --- CHANGELOG.md | 33 ++++++++++++ README.md | 8 +-- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- 427 files changed, 507 insertions(+), 468 deletions(-) create mode 100644 .changes/2.20.117.json delete mode 100644 .changes/next-release/feature-AWSBatch-f3e1441.json delete mode 100644 .changes/next-release/feature-AWSDatabaseMigrationService-3778717.json delete mode 100644 .changes/next-release/feature-AWSElementalMediaLive-f60ad9b.json delete mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json delete mode 100644 .changes/next-release/feature-AmazonCloudWatchInternetMonitor-97f4c59.json delete mode 100644 .changes/next-release/feature-AmazonPolly-af7d385.json delete mode 100644 .changes/next-release/feature-AmazonRelationalDatabaseService-4a34c80.json delete mode 100644 .changes/next-release/feature-AmazonSageMakerService-c9291c9.json diff --git a/.changes/2.20.117.json b/.changes/2.20.117.json new file mode 100644 index 000000000000..6875a98fc8ca --- /dev/null +++ b/.changes/2.20.117.json @@ -0,0 +1,54 @@ +{ + "version": "2.20.117", + "date": "2023-08-01", + "entries": [ + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "This release adds support for price capacity optimized allocation strategy for Spot Instances." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Internet Monitor", + "contributor": "", + "description": "This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event." + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Added support for deleted clusters PiTR." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Add Stairs TrafficPattern and FlatInvocations to RecommendationJobStoppingConditions" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AWSBatch-f3e1441.json b/.changes/next-release/feature-AWSBatch-f3e1441.json deleted file mode 100644 index 6775f12f882c..000000000000 --- a/.changes/next-release/feature-AWSBatch-f3e1441.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Batch", - "contributor": "", - "description": "This release adds support for price capacity optimized allocation strategy for Spot Instances." -} diff --git a/.changes/next-release/feature-AWSDatabaseMigrationService-3778717.json b/.changes/next-release/feature-AWSDatabaseMigrationService-3778717.json deleted file mode 100644 index 96c58a4bc78f..000000000000 --- a/.changes/next-release/feature-AWSDatabaseMigrationService-3778717.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Database Migration Service", - "contributor": "", - "description": "Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version." -} diff --git a/.changes/next-release/feature-AWSElementalMediaLive-f60ad9b.json b/.changes/next-release/feature-AWSElementalMediaLive-f60ad9b.json deleted file mode 100644 index e0ad020b38ce..000000000000 --- a/.changes/next-release/feature-AWSElementalMediaLive-f60ad9b.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Elemental MediaLive", - "contributor": "", - "description": "AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone." -} diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json deleted file mode 100644 index e5b5ee3ca5e3..000000000000 --- a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS SDK for Java v2", - "contributor": "", - "description": "Updated endpoint and partition metadata." -} diff --git a/.changes/next-release/feature-AmazonCloudWatchInternetMonitor-97f4c59.json b/.changes/next-release/feature-AmazonCloudWatchInternetMonitor-97f4c59.json deleted file mode 100644 index db8859f891ca..000000000000 --- a/.changes/next-release/feature-AmazonCloudWatchInternetMonitor-97f4c59.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon CloudWatch Internet Monitor", - "contributor": "", - "description": "This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event." -} diff --git a/.changes/next-release/feature-AmazonPolly-af7d385.json b/.changes/next-release/feature-AmazonPolly-af7d385.json deleted file mode 100644 index 375c126b7ca5..000000000000 --- a/.changes/next-release/feature-AmazonPolly-af7d385.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Polly", - "contributor": "", - "description": "Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only." -} diff --git a/.changes/next-release/feature-AmazonRelationalDatabaseService-4a34c80.json b/.changes/next-release/feature-AmazonRelationalDatabaseService-4a34c80.json deleted file mode 100644 index b430d8ddcdb7..000000000000 --- a/.changes/next-release/feature-AmazonRelationalDatabaseService-4a34c80.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Relational Database Service", - "contributor": "", - "description": "Added support for deleted clusters PiTR." -} diff --git a/.changes/next-release/feature-AmazonSageMakerService-c9291c9.json b/.changes/next-release/feature-AmazonSageMakerService-c9291c9.json deleted file mode 100644 index b16348292120..000000000000 --- a/.changes/next-release/feature-AmazonSageMakerService-c9291c9.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon SageMaker Service", - "contributor": "", - "description": "Add Stairs TrafficPattern and FlatInvocations to RecommendationJobStoppingConditions" -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d32ea3d3f99..140390d38bd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,36 @@ +# __2.20.117__ __2023-08-01__ +## __AWS Batch__ + - ### Features + - This release adds support for price capacity optimized allocation strategy for Spot Instances. + +## __AWS Database Migration Service__ + - ### Features + - Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version. + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon CloudWatch Internet Monitor__ + - ### Features + - This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only. + +## __Amazon Relational Database Service__ + - ### Features + - Added support for deleted clusters PiTR. + +## __Amazon SageMaker Service__ + - ### Features + - Add Stairs TrafficPattern and FlatInvocations to RecommendationJobStoppingConditions + # __2.20.116__ __2023-07-31__ ## __AWS Amplify UI Builder__ - ### Features diff --git a/README.md b/README.md index cf2ab85b338a..3ab1dbc5a303 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same verAdds the resource mapping for the draft application version. You can also update an existing resource mapping to a new physical resource.
" }, + "BatchUpdateRecommendationStatus":{ + "name":"BatchUpdateRecommendationStatus", + "http":{ + "method":"POST", + "requestUri":"/batch-update-recommendation-status", + "responseCode":200 + }, + "input":{"shape":"BatchUpdateRecommendationStatusRequest"}, + "output":{"shape":"BatchUpdateRecommendationStatusResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"Enables you to include or exclude one or more operational recommendations.
" + }, "CreateApp":{ "name":"CreateApp", "http":{ @@ -49,7 +67,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe an Resilience Hub application, you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate resiliency policy. For more information about the number of resources supported per application, see Service Quotas.
After you create an Resilience Hub application, you publish it so that you can run a resiliency assessment on it. You can then use recommendations from the assessment to improve resiliency by running another assessment, comparing results, and then iterating the process until you achieve your goals for recovery time objective (RTO) and recovery point objective (RPO).
" + "documentation":"Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application, you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate resiliency policy. In addition, you can also add resources that are located on Amazon Elastic Kubernetes Service (Amazon EKS) clusters as optional resources. For more information about the number of resources supported per application, see Service quotas.
After you create an Resilience Hub application, you publish it so that you can run a resiliency assessment on it. You can then use recommendations from the assessment to improve resiliency by running another assessment, comparing results, and then iterating the process until you achieve your goals for recovery time objective (RTO) and recovery point objective (RPO).
" }, "CreateAppVersionAppComponent":{ "name":"CreateAppVersionAppComponent", @@ -438,6 +456,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} @@ -462,6 +481,23 @@ ], "documentation":"Lists the alarm recommendations for an Resilience Hub application.
" }, + "ListAppAssessmentComplianceDrifts":{ + "name":"ListAppAssessmentComplianceDrifts", + "http":{ + "method":"POST", + "requestUri":"/list-app-assessment-compliance-drifts", + "responseCode":200 + }, + "input":{"shape":"ListAppAssessmentComplianceDriftsRequest"}, + "output":{"shape":"ListAppAssessmentComplianceDriftsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"List of compliance drifts that were detected while running an assessment.
" + }, "ListAppAssessments":{ "name":"ListAppAssessments", "http":{ @@ -1003,7 +1039,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Mappings used to map logical resources from the template to physical resources. You can use the mapping type CFN_STACK if the application template uses a logical stack name. Or you can map individual resources by using the mapping type RESOURCE. We recommend using the mapping type CFN_STACK if the application is backed by a CloudFormation stack.
List of sources that are used to map a logical resource from the template to a physical resource. You can use sources such as CloudFormation, Terraform state files, AppRegistry applications, or Amazon EKS.
" } } }, @@ -1055,19 +1091,25 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"The Application Component for the CloudWatch alarm recommendation.
" + "documentation":"Application Component name for the CloudWatch alarm recommendation. This name is saved as the first item in the appComponentNames list.
List of Application Component names for the CloudWatch alarm recommendation.
" }, "description":{ "shape":"EntityDescription", - "documentation":"The description of the recommendation.
" + "documentation":"Description of the alarm recommendation.
" }, "items":{ "shape":"RecommendationItemList", - "documentation":"The list of CloudWatch alarm recommendations.
" + "documentation":"List of CloudWatch alarm recommendations.
" }, "name":{ "shape":"String500", - "documentation":"The name of the alarm recommendation.
" + "documentation":"Name of the alarm recommendation.
" }, "prerequisite":{ "shape":"String500", @@ -1075,15 +1117,15 @@ }, "recommendationId":{ "shape":"Uuid", - "documentation":"The identifier of the alarm recommendation.
" + "documentation":"Identifier of the alarm recommendation.
" }, "referenceId":{ "shape":"SpecReferenceId", - "documentation":"The reference identifier of the alarm recommendation.
" + "documentation":"Reference identifier of the alarm recommendation.
" }, "type":{ "shape":"AlarmType", - "documentation":"The type of alarm recommendation.
" + "documentation":"Type of alarm recommendation.
" } }, "documentation":"Defines a recommendation for a CloudWatch alarm.
" @@ -1118,51 +1160,67 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Assessment execution schedule with 'Daily' or 'Disabled' values.
" + "documentation":"Assessment execution schedule with 'Daily' or 'Disabled' values.
" }, "complianceStatus":{ "shape":"AppComplianceStatusType", - "documentation":"The current status of compliance for the resiliency policy.
" + "documentation":"Current status of compliance for the resiliency policy.
" }, "creationTime":{ "shape":"TimeStamp", - "documentation":"The timestamp for when the app was created.
" + "documentation":"Timestamp for when the app was created.
" }, "description":{ "shape":"EntityDescription", - "documentation":"The optional description for an app.
" + "documentation":"Optional description for an application.
" + }, + "driftStatus":{ + "shape":"AppDriftStatusType", + "documentation":"Indicates if compliance drifts (deviations) were detected while running an assessment for your application.
" + }, + "eventSubscriptions":{ + "shape":"EventSubscriptionList", + "documentation":"The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected and Scheduled assessment failure events.
" }, "lastAppComplianceEvaluationTime":{ "shape":"TimeStamp", - "documentation":"The timestamp for the most recent compliance evaluation.
" + "documentation":"Timestamp for the most recent compliance evaluation.
" + }, + "lastDriftEvaluationTime":{ + "shape":"TimeStamp", + "documentation":"Indicates the last time that a drift was evaluated.
" }, "lastResiliencyScoreEvaluationTime":{ "shape":"TimeStamp", - "documentation":"The timestamp for the most recent resiliency score evaluation.
" + "documentation":"Timestamp for the most recent resiliency score evaluation.
" }, "name":{ "shape":"EntityName", - "documentation":"The name for the application.
" + "documentation":"Name for the application.
" + }, + "permissionModel":{ + "shape":"PermissionModel", + "documentation":"Defines the roles and credentials that Resilience Hub would use while creating the application, importing its resources, and running an assessment.
" }, "policyArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The current resiliency score for the application.
" + "documentation":"Current resiliency score for the application.
" }, "status":{ "shape":"AppStatusType", - "documentation":"The status of the application.
" + "documentation":"Status of the application.
" }, "tags":{ "shape":"TagMap", - "documentation":"The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" } }, "documentation":"Defines an Resilience Hub application.
" @@ -1177,39 +1235,43 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The version of the application.
" + "documentation":"Version of an application.
" }, "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The name of the assessment.
" + "documentation":"Name of the assessment.
" }, "assessmentStatus":{ "shape":"AssessmentStatus", - "documentation":"The current status of the assessment for the resiliency policy.
" + "documentation":"Current status of the assessment for the resiliency policy.
" }, "compliance":{ "shape":"AssessmentCompliance", - "documentation":"The application compliance against the resiliency policy.
" + "documentation":"Application compliance against the resiliency policy.
" }, "complianceStatus":{ "shape":"ComplianceStatus", - "documentation":"The current status of the compliance for the resiliency policy.
" + "documentation":"Current status of the compliance for the resiliency policy.
" }, "cost":{ "shape":"Cost", - "documentation":"The cost for the application.
" + "documentation":"Cost for the application.
" + }, + "driftStatus":{ + "shape":"DriftStatus", + "documentation":"Indicates if compliance drifts (deviations) were detected while running an assessment for your application.
" }, "endTime":{ "shape":"TimeStamp", - "documentation":"The end time for the action.
" + "documentation":"End time for the action.
" }, "invoker":{ "shape":"AssessmentInvoker", @@ -1221,11 +1283,11 @@ }, "policy":{ "shape":"ResiliencyPolicy", - "documentation":"The resiliency policy.
" + "documentation":"Resiliency policy of an application.
" }, "resiliencyScore":{ "shape":"ResiliencyScore", - "documentation":"The current resiliency score for the application.
" + "documentation":"Current resiliency score for an application.
" }, "resourceErrorsDetails":{ "shape":"ResourceErrorsDetails", @@ -1233,11 +1295,15 @@ }, "startTime":{ "shape":"TimeStamp", - "documentation":"The starting time for the action.
" + "documentation":"Starting time for the action.
" }, "tags":{ "shape":"TagMap", - "documentation":"The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"Version name of the published application.
" } }, "documentation":"Defines an application assessment.
" @@ -1258,51 +1324,59 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The version of the application.
" + "documentation":"Version of an application.
" }, "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The name of the assessment.
" + "documentation":"Name of the assessment.
" }, "assessmentStatus":{ "shape":"AssessmentStatus", - "documentation":"The current status of the assessment for the resiliency policy.
" + "documentation":"Current status of the assessment for the resiliency policy.
" }, "complianceStatus":{ "shape":"ComplianceStatus", - "documentation":"The current status of compliance for the resiliency policy.
" + "documentation":"TCurrent status of compliance for the resiliency policy.
" }, "cost":{ "shape":"Cost", - "documentation":"The cost for the application.
" + "documentation":"Cost for an application.
" + }, + "driftStatus":{ + "shape":"DriftStatus", + "documentation":"Indicates if compliance drifts (deviations) were detected while running an assessment for your application.
" }, "endTime":{ "shape":"TimeStamp", - "documentation":"The end time for the action.
" + "documentation":"End time for the action.
" }, "invoker":{ "shape":"AssessmentInvoker", - "documentation":"The entity that invoked the assessment.
" + "documentation":"Entity that invoked the assessment.
" }, "message":{ "shape":"String500", - "documentation":"The message from the assessment run.
" + "documentation":"Message from the assessment run.
" }, "resiliencyScore":{ "shape":"Double", - "documentation":"The current resiliency score for the application.
" + "documentation":"Current resiliency score for the application.
" }, "startTime":{ "shape":"TimeStamp", - "documentation":"The starting time for the action.
" + "documentation":"Starting time for the action.
" + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"Name of an application version.
" } }, "documentation":"Defines an application assessment summary.
" @@ -1333,11 +1407,11 @@ }, "id":{ "shape":"String255", - "documentation":"Unique identifier of the Application Component.
" + "documentation":"Identifier of the Application Component.
" }, "name":{ "shape":"String255", - "documentation":"The name of the Application Component.
" + "documentation":"Name of the Application Component.
" }, "type":{ "shape":"String255", @@ -1351,7 +1425,7 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"The name of the Application Component.
" + "documentation":"Name of the Application Component.
" }, "compliance":{ "shape":"AssessmentCompliance", @@ -1371,7 +1445,7 @@ }, "status":{ "shape":"ComplianceStatus", - "documentation":"The status of the action.
" + "documentation":"Status of the action.
" } }, "documentation":"Defines the compliance of an Application Component against the resiliency policy.
" @@ -1384,6 +1458,14 @@ "type":"list", "member":{"shape":"String255"} }, + "AppDriftStatusType":{ + "type":"string", + "enum":[ + "NotChecked", + "NotDetected", + "Detected" + ] + }, "AppInputSource":{ "type":"structure", "required":["importType"], @@ -1436,7 +1518,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The optional description for an app.
" }, + "driftStatus":{ + "shape":"AppDriftStatusType", + "documentation":"Indicates if compliance drifts (deviations) were detected while running an assessment for your application.
" + }, "name":{ "shape":"EntityName", "documentation":"The name of the application.
" @@ -1464,7 +1550,7 @@ }, "status":{ "shape":"AppStatusType", - "documentation":"The status of the application.
" + "documentation":"Status of the application.
" } }, "documentation":"Defines an application summary.
" @@ -1489,10 +1575,22 @@ "members":{ "appVersion":{ "shape":"EntityVersion", - "documentation":"The version of the application.
" + "documentation":"Version of an application.
" + }, + "creationTime":{ + "shape":"TimeStamp", + "documentation":"Creation time of the application version.
" + }, + "identifier":{ + "shape":"LongOptional", + "documentation":"Identifier of the application version.
" + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"Name of the application version.
" } }, - "documentation":"The version of the application.
" + "documentation":"Version of an application.
" }, "Arn":{ "type":"string", @@ -1533,6 +1631,103 @@ "type":"string", "pattern":"^[a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]$" }, + "BatchUpdateRecommendationStatusFailedEntries":{ + "type":"list", + "member":{"shape":"BatchUpdateRecommendationStatusFailedEntry"} + }, + "BatchUpdateRecommendationStatusFailedEntry":{ + "type":"structure", + "required":[ + "entryId", + "errorMessage" + ], + "members":{ + "entryId":{ + "shape":"String255", + "documentation":"An identifier of an entry in this batch that is used to communicate the result.
The entryIds of a batch request need to be unique within a request.
Indicates the error that occurred while excluding an operational recommendation.
" + } + }, + "documentation":"List of operational recommendations that did not get included or excluded.
" + }, + "BatchUpdateRecommendationStatusRequest":{ + "type":"structure", + "required":[ + "appArn", + "requestEntries" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Defines the list of operational recommendations that need to be included or excluded.
" + } + } + }, + "BatchUpdateRecommendationStatusResponse":{ + "type":"structure", + "required":[ + "appArn", + "failedEntries", + "successfulEntries" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
A list of items with error details about each item, which could not be included or excluded.
" + }, + "successfulEntries":{ + "shape":"BatchUpdateRecommendationStatusSuccessfulEntries", + "documentation":"A list of items that were included or excluded.
" + } + } + }, + "BatchUpdateRecommendationStatusSuccessfulEntries":{ + "type":"list", + "member":{"shape":"BatchUpdateRecommendationStatusSuccessfulEntry"} + }, + "BatchUpdateRecommendationStatusSuccessfulEntry":{ + "type":"structure", + "required":[ + "entryId", + "excluded", + "item", + "referenceId" + ], + "members":{ + "entryId":{ + "shape":"String255", + "documentation":"An identifier for an entry in this batch that is used to communicate the result.
The entryIds of a batch request need to be unique within a request.
Indicates the reason for excluding an operational recommendation.
" + }, + "excluded":{ + "shape":"BooleanOptional", + "documentation":"Indicates if the operational recommendation was successfully excluded.
" + }, + "item":{ + "shape":"UpdateRecommendationStatusItem", + "documentation":"The operational recommendation item.
" + }, + "referenceId":{ + "shape":"SpecReferenceId", + "documentation":"Reference identifier of the operational recommendation.
" + } + }, + "documentation":"List of operational recommendations that were successfully included or excluded.
" + }, "BooleanOptional":{ "type":"boolean", "box":true @@ -1543,6 +1738,56 @@ "min":1, "pattern":"^[A-za-z0-9_.-]{0,63}$" }, + "ComplianceDrift":{ + "type":"structure", + "members":{ + "actualReferenceId":{ + "shape":"String255", + "documentation":"Assessment identifier that is associated with this drift item.
" + }, + "actualValue":{ + "shape":"AssessmentCompliance", + "documentation":"Actual compliance value of the entity.
" + }, + "appId":{ + "shape":"String255", + "documentation":"Identifier of your application.
" + }, + "appVersion":{ + "shape":"String255", + "documentation":"Published version of your application on which drift was detected.
" + }, + "diffType":{ + "shape":"DifferenceType", + "documentation":"Difference type between actual and expected recovery point objective (RPO) and recovery time objective (RTO) values. Currently, Resilience Hub supports only NotEqual difference type.
" + }, + "driftType":{ + "shape":"DriftType", + "documentation":"The type of drift detected. Currently, Resilience Hub supports only ApplicationCompliance drift type.
" + }, + "entityId":{ + "shape":"String255", + "documentation":"Identifier of an entity in which drift was detected. For compliance drift, the entity ID can be either application ID or the AppComponent ID.
" + }, + "entityType":{ + "shape":"String255", + "documentation":"The type of entity in which drift was detected. For compliance drifts, Resilience Hub supports AWS::ResilienceHub::AppComponent and AWS::ResilienceHub::Application.
Assessment identifier of a previous assessment of the same application version. Resilience Hub uses the previous assessment (associated with the reference identifier) to compare the compliance with the current assessment to identify drifts.
" + }, + "expectedValue":{ + "shape":"AssessmentCompliance", + "documentation":"The expected compliance value of an entity.
" + } + }, + "documentation":"Indicates the compliance drifts (recovery time objective (RTO) and recovery point objective (RPO)) that were detected for an assessed entity.
" + }, + "ComplianceDriftList":{ + "type":"list", + "member":{"shape":"ComplianceDrift"} + }, "ComplianceStatus":{ "type":"string", "enum":[ @@ -1564,15 +1809,15 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"The name of the Application Component.
" + "documentation":"Name of the Application Component.
" }, "configRecommendations":{ "shape":"ConfigRecommendationList", - "documentation":"The list of recommendations.
" + "documentation":"List of recommendations.
" }, "recommendationStatus":{ "shape":"RecommendationComplianceStatus", - "documentation":"The recommendation status.
" + "documentation":"Status of the recommendation.
" } }, "documentation":"Defines recommendations for an Resilience Hub Application Component, returned as an object. This object contains component names, configuration recommendations, and recommendation statuses.
" @@ -1591,7 +1836,7 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"The name of the Application Component.
" + "documentation":"Name of the Application Component.
" }, "compliance":{ "shape":"AssessmentCompliance", @@ -1623,14 +1868,14 @@ }, "referenceId":{ "shape":"SpecReferenceId", - "documentation":"The reference identifier for the recommendation configuration.
" + "documentation":"Reference identifier for the recommendation configuration.
" }, "suggestedChanges":{ "shape":"SuggestedChangesList", "documentation":"List of the suggested configuration changes.
" } }, - "documentation":"Defines a configuration recommendation.
" + "documentation":"Defines a recommendation configuration.
" }, "ConfigRecommendationList":{ "type":"list", @@ -1716,17 +1961,25 @@ "shape":"EntityDescription", "documentation":"The optional description for an app.
" }, + "eventSubscriptions":{ + "shape":"EventSubscriptionList", + "documentation":"The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports only Drift detected and Scheduled assessment failure events notification.
" + }, "name":{ "shape":"EntityName", - "documentation":"The name for the application.
" + "documentation":"Name of the application.
" + }, + "permissionModel":{ + "shape":"PermissionModel", + "documentation":"Defines the roles and credentials that Resilience Hub would use while creating the application, importing its resources, and running an assessment.
" }, "policyArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" } } }, @@ -1754,7 +2007,7 @@ }, "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The identifier of the Application Component.
" + "documentation":"Identifier of the Application Component.
" }, "name":{ "shape":"String255", - "documentation":"The name of the Application Component.
" + "documentation":"Name of the Application Component.
" }, "type":{ "shape":"String255", - "documentation":"The type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
" + "documentation":"Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
" } } }, @@ -1784,15 +2037,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The list of Application Components that belong to this resource.
" + "documentation":"List of Application Components that belong to this resource.
" }, "appVersion":{ "shape":"EntityVersion", - "documentation":"The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" } } }, @@ -1812,19 +2065,19 @@ }, "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The list of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
" + "documentation":"List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
" }, "awsAccountId":{ "shape":"CustomerId", - "documentation":"The Amazon Web Services account that owns the physical resource.
" + "documentation":"Amazon Web Services account that owns the physical resource.
" }, "awsRegion":{ "shape":"AwsRegion", - "documentation":"The Amazon Web Services region that owns the physical resource.
" + "documentation":"Amazon Web Services region that owns the physical resource.
" }, "clientToken":{ "shape":"ClientToken", @@ -1833,19 +2086,19 @@ }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"The logical identifier of the resource.
" + "documentation":"Logical identifier of the resource.
" }, "physicalResourceId":{ "shape":"String2048", - "documentation":"The physical identifier of the resource.
" + "documentation":"Physical identifier of the resource.
" }, "resourceName":{ "shape":"EntityName", - "documentation":"The name of the resource.
" + "documentation":"Name of the resource.
" }, "resourceType":{ "shape":"String255", - "documentation":"The type of resource.
" + "documentation":"Type of resource.
" } } }, @@ -1858,11 +2111,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" }, "physicalResource":{ "shape":"PhysicalResource", @@ -1879,7 +2132,7 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" } } }, @@ -1952,7 +2205,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" }, "tier":{ "shape":"ResiliencyPolicyTier", @@ -1993,7 +2246,7 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The name of the input source from where the application resource is imported from.
" + "documentation":"Name of the input source from where the application resource is imported from.
" } } }, @@ -2065,7 +2318,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The identifier of the Application Component.
" + "documentation":"Identifier of the Application Component.
" } } }, @@ -2119,15 +2372,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The list of Application Components that belong to this resource.
" + "documentation":"List of Application Components that belong to this resource.
" }, "appVersion":{ "shape":"EntityVersion", - "documentation":"The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" } } }, @@ -2137,15 +2390,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Web Services account that owns the physical resource.
" + "documentation":"Amazon Web Services account that owns the physical resource.
" }, "awsRegion":{ "shape":"AwsRegion", - "documentation":"The Amazon Web Services region that owns the physical resource.
" + "documentation":"Amazon Web Services region that owns the physical resource.
" }, "clientToken":{ "shape":"ClientToken", @@ -2154,15 +2407,15 @@ }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"The logical identifier of the resource.
" + "documentation":"Logical identifier of the resource.
" }, "physicalResourceId":{ "shape":"String2048", - "documentation":"The physical identifier of the resource.
" + "documentation":"Physical identifier of the resource.
" }, "resourceName":{ "shape":"EntityName", - "documentation":"The name of the resource.
" + "documentation":"Name of the resource.
" } } }, @@ -2175,11 +2428,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" }, "physicalResource":{ "shape":"PhysicalResource", @@ -2215,7 +2468,7 @@ }, "status":{ "shape":"RecommendationTemplateStatus", - "documentation":"The status of the action.
" + "documentation":"Status of the action.
" } } }, @@ -2230,7 +2483,7 @@ }, "policyArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" }, "id":{ "shape":"String255", - "documentation":"The identifier of the Application Component.
" + "documentation":"Identifier of the Application Component.
" } } }, @@ -2315,15 +2568,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The list of Application Components that belong to this resource.
" + "documentation":"List of Application Components that belong to this resource.
" }, "appVersion":{ "shape":"EntityVersion", - "documentation":"The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" } } }, @@ -2336,11 +2589,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" } } }, @@ -2353,31 +2606,31 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" }, "awsAccountId":{ "shape":"CustomerId", - "documentation":"The Amazon Web Services account that owns the physical resource.
" + "documentation":"Amazon Web Services account that owns the physical resource.
" }, "awsRegion":{ "shape":"AwsRegion", - "documentation":"The Amazon Web Services region that owns the physical resource.
" + "documentation":"Amazon Web Services region that owns the physical resource.
" }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"The logical identifier of the resource.
" + "documentation":"Logical identifier of the resource.
" }, "physicalResourceId":{ "shape":"String2048", - "documentation":"The physical identifier of the resource.
" + "documentation":"Physical identifier of the resource.
" }, "resourceName":{ "shape":"EntityName", - "documentation":"The name of the resource.
" + "documentation":"Name of the resource.
" } } }, @@ -2390,11 +2643,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" }, "physicalResource":{ "shape":"PhysicalResource", @@ -2411,7 +2664,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The status of the action.
" + "documentation":"Status of the action.
" } } }, @@ -2467,11 +2720,11 @@ }, "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" } } }, @@ -2484,7 +2737,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section.
The appTemplateBody JSON string has the following structure:
resources
The list of logical resources that must be included in the Resilience Hub application.
Type: Array
Don't add the resources that you want to exclude.
Each resources array item includes the following fields:
logicalResourceId
The logical identifier of the resource.
Type: Object
Each logicalResourceId object includes the following fields:
identifier
The identifier of the resource.
Type: String
logicalStackName
The name of the CloudFormation stack this resource belongs to.
Type: String
resourceGroupName
The name of the resource group this resource belongs to.
Type: String
terraformSourceName
The name of the Terraform S3 state file this resource belongs to.
Type: String
eksSourceName
The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Type: String
type
The type of resource.
Type: string
name
The name of the resource.
Type: String
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
appComponents
The list of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
Type: Array
Each appComponents array item includes the following fields:
name
The name of the Application Component.
Type: String
type
The type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
Type: String
resourceNames
The list of included resources that are assigned to the Application Component.
Type: Array of strings
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
excludedResources
The list of logical resource identifiers to be excluded from the application.
Type: Array
Don't add the resources that you want to include.
Each excludedResources array item includes the following fields:
logicalResourceIds
The logical identifier of the resource.
Type: Object
You can configure only one of the following fields:
logicalStackName
resourceGroupName
terraformSourceName
eksSourceName
Each logicalResourceIds object includes the following fields:
identifier
The identifier of the resource.
Type: String
logicalStackName
The name of the CloudFormation stack this resource belongs to.
Type: String
resourceGroupName
The name of the resource group this resource belongs to.
Type: String
terraformSourceName
The name of the Terraform S3 state file this resource belongs to.
Type: String
eksSourceName
The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Type: String
version
The Resilience Hub application version.
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section.
The appTemplateBody JSON string has the following structure:
resources
The list of logical resources that must be included in the Resilience Hub application.
Type: Array
Don't add the resources that you want to exclude.
Each resources array item includes the following fields:
logicalResourceId
Logical identifier of the resource.
Type: Object
Each logicalResourceId object includes the following fields:
identifier
Identifier of the resource.
Type: String
logicalStackName
The name of the CloudFormation stack this resource belongs to.
Type: String
resourceGroupName
The name of the resource group this resource belongs to.
Type: String
terraformSourceName
The name of the Terraform S3 state file this resource belongs to.
Type: String
eksSourceName
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Type: String
type
The type of resource.
Type: string
name
The name of the resource.
Type: String
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
appComponents
List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
Type: Array
Each appComponents array item includes the following fields:
name
Name of the Application Component.
Type: String
type
Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
Type: String
resourceNames
The list of included resources that are assigned to the Application Component.
Type: Array of strings
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
excludedResources
The list of logical resource identifiers to be excluded from the application.
Type: Array
Don't add the resources that you want to include.
Each excludedResources array item includes the following fields:
logicalResourceIds
Logical identifier of the resource.
Type: Object
You can configure only one of the following fields:
logicalStackName
resourceGroupName
terraformSourceName
eksSourceName
Each logicalResourceIds object includes the following fields:
identifier
Identifier of the resource.
Type: String
logicalStackName
The name of the CloudFormation stack this resource belongs to.
Type: String
resourceGroupName
The name of the resource group this resource belongs to.
Type: String
terraformSourceName
The name of the Terraform S3 state file this resource belongs to.
Type: String
eksSourceName
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Type: String
version
Resilience Hub application version.
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The status of the action.
" + "documentation":"Status of the action.
" }, "statusChangeTime":{ "shape":"TimeStamp", @@ -2561,7 +2814,7 @@ "members":{ "policyArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The RPO reference identifier.
" + "documentation":"Reference identifier of the RPO .
" }, "rtoDescription":{ "shape":"String500", @@ -2617,7 +2874,7 @@ }, "rtoReferenceId":{ "shape":"String500", - "documentation":"The RTO reference identifier.
" + "documentation":"Reference identifier of the RTO.
" } }, "documentation":"Defines the compliance against the resiliency policy for a disruption.
" @@ -2647,6 +2904,18 @@ "min":1 }, "Double":{"type":"double"}, + "DriftStatus":{ + "type":"string", + "enum":[ + "NotChecked", + "NotDetected", + "Detected" + ] + }, + "DriftType":{ + "type":"string", + "enum":["ApplicationCompliance"] + }, "EksNamespace":{ "type":"string", "max":63, @@ -2666,7 +2935,7 @@ "members":{ "eksClusterArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Amazon Elastic Kubernetes Service cluster. The format for this ARN is: arn:aws:eks:region:account-id:cluster/cluster-name. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Amazon Elastic Kubernetes Service cluster. The format for this ARN is: arn:aws:eks:region:account-id:cluster/cluster-name. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Amazon Elastic Kubernetes Service cluster. The format for this ARN is: arn:aws:eks:region:account-id:cluster/cluster-name. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Amazon Elastic Kubernetes Service cluster. The format for this ARN is: arn:aws:eks:region:account-id:cluster/cluster-name. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The type of event you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected (DriftDetected) and Scheduled assessment failure (ScheduledAssessmentFailure) events.
Unique name to identify an event subscription.
" + }, + "snsTopicArn":{ + "shape":"Arn", + "documentation":"Amazon Resource Name (ARN) of the Amazon Simple Notification Service topic. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Indicates an event you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected and Scheduled assessment failure events.
" + }, + "EventSubscriptionList":{ + "type":"list", + "member":{"shape":"EventSubscription"}, + "max":10, + "min":0 + }, + "EventType":{ + "type":"string", + "enum":[ + "ScheduledAssessmentFailure", + "DriftDetected" + ] + }, + "ExcludeRecommendationReason":{ + "type":"string", + "enum":[ + "AlreadyImplemented", + "NotRelevant", + "ComplexityOfImplementation" + ] + }, "FailurePolicy":{ "type":"structure", "required":[ @@ -2760,13 +3072,27 @@ "NoRecoveryPlan" ] }, + "IamRoleArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):iam::[0-9]{12}:role/(([^/][!-~]+/){1,511})?[A-Za-z0-9_+=,.@-]{1,64}$" + }, + "IamRoleArnList":{ + "type":"list", + "member":{"shape":"IamRoleArn"}, + "max":10, + "min":0 + }, + "IamRoleName":{ + "type":"string", + "pattern":"^([^/]([!-~]+/){1,511})?[A-Za-z0-9_+=,.@-]{1,64}$" + }, "ImportResourcesToDraftAppVersionRequest":{ "type":"structure", "required":["appArn"], "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The status of the action.
" + "documentation":"Status of the action.
" }, "terraformSources":{ "shape":"TerraformSourceList", @@ -2837,11 +3163,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" + } + } + }, + "ListAppAssessmentComplianceDriftsRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{ + "shape":"Arn", + "documentation":"Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Indicates the maximum number of applications requested.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"Indicates the unique token number of the next application to be checked for compliance and regulatory requirements from the list of applications.
" + } + } + }, + "ListAppAssessmentComplianceDriftsResponse":{ + "type":"structure", + "required":["complianceDrifts"], + "members":{ + "complianceDrifts":{ + "shape":"ComplianceDriftList", + "documentation":"Indicates compliance drifts (recovery time objective (RTO) and recovery point objective (RPO)) detected for an assessed entity.
" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"Token number of the next application to be checked for compliance and regulatory requirements from the list of applications.
" } } }, @@ -2868,7 +3226,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" } } }, @@ -2936,11 +3294,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" } } }, @@ -2968,11 +3326,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" } } }, @@ -3003,11 +3361,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" }, "maxResults":{ "shape":"MaxResults", @@ -3029,7 +3387,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" } } }, @@ -3042,11 +3400,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The version of the Application Component.
" + "documentation":"Version of the Application Component.
" }, "maxResults":{ "shape":"MaxResults", @@ -3067,7 +3425,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" }, "nextToken":{ "shape":"NextToken", - "documentation":"The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" } } }, @@ -3092,7 +3450,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" }, "resourceMappings":{ "shape":"ResourceMappingList", @@ -3131,7 +3489,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" }, "physicalResources":{ "shape":"PhysicalResourceList", @@ -3178,15 +3536,23 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Upper limit of the time range to filter the application versions.
" }, "maxResults":{ "shape":"MaxResults", - "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Null, or the token from a previous call to get the next set of results.
" + }, + "startTime":{ + "shape":"TimeStamp", + "documentation":"Lower limit of the time range to filter the application versions.
" } } }, @@ -3200,7 +3566,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" } } }, @@ -3209,13 +3575,13 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" } } }, @@ -3253,13 +3619,13 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The status of the action.
", + "documentation":"Status of the action.
", "location":"querystring", "locationName":"status" } @@ -3300,7 +3666,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" }, "recommendationTemplates":{ "shape":"RecommendationTemplateList", @@ -3313,7 +3679,7 @@ "members":{ "maxResults":{ "shape":"MaxResults", - "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" }, "resiliencyPolicies":{ "shape":"ResiliencyPolicies", @@ -3351,11 +3717,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" }, "sopRecommendations":{ "shape":"SopRecommendationList", @@ -3382,7 +3748,7 @@ "members":{ "maxResults":{ "shape":"MaxResults", - "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" }, "resiliencyPolicies":{ "shape":"ResiliencyPolicies", @@ -3425,7 +3791,7 @@ "members":{ "tags":{ "shape":"TagMap", - "documentation":"The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" } } }, @@ -3435,11 +3801,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" }, "testRecommendations":{ "shape":"TestRecommendationList", @@ -3470,7 +3836,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.
The token for the next set of results, or null if there are no more results.
" + "documentation":"Token for the next set of results, or null if there are no more results.
" }, "resolutionId":{ "shape":"String255", @@ -3517,11 +3883,11 @@ "members":{ "eksSourceName":{ "shape":"String255", - "documentation":"The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
The identifier of the resource.
" + "documentation":"Identifier of the resource.
" }, "logicalStackName":{ "shape":"String255", @@ -3538,6 +3904,10 @@ }, "documentation":"Defines a logical resource identifier.
" }, + "LongOptional":{ + "type":"long", + "box":true + }, "MaxResults":{ "type":"integer", "box":true, @@ -3548,6 +3918,32 @@ "type":"string", "pattern":"^\\S{1,2000}$" }, + "PermissionModel":{ + "type":"structure", + "required":["type"], + "members":{ + "crossAccountRoleArns":{ + "shape":"IamRoleArnList", + "documentation":"Defines a list of role Amazon Resource Names (ARNs) to be used in other accounts. These ARNs are used for querying purposes while importing resources and assessing your application.
These ARNs are required only when your resources are in other accounts and you have different role name in these accounts. Else, the invoker role name will be used in the other accounts.
These roles must have a trust policy with iam:AssumeRole permission to the invoker role in the primary account.
Existing Amazon Web Services IAM role name in the primary Amazon Web Services account that will be assumed by Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.
You must have iam:passRole permission for this role while creating or updating the application.
Defines how Resilience Hub scans your resources. It can scan for the resources by using a pre-existing role in your Amazon Web Services account, or by using the credentials of the current IAM user.
" + } + }, + "documentation":"Defines the roles and credentials that Resilience Hub would use while creating the application, importing its resources, and running an assessment.
" + }, + "PermissionModelType":{ + "type":"string", + "enum":[ + "LegacyIAMUser", + "RoleBased" + ] + }, "PhysicalIdentifierType":{ "type":"string", "enum":[ @@ -3577,15 +3973,15 @@ }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"The logical identifier of the resource.
" + "documentation":"Logical identifier of the resource.
" }, "parentResourceName":{ "shape":"EntityName", - "documentation":"The name of the parent resource.
" + "documentation":"Name of the parent resource.
" }, "physicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"The physical identifier of the resource.
" + "documentation":"Identifier of the physical resource.
" }, "resourceName":{ "shape":"EntityName", @@ -3597,7 +3993,7 @@ }, "sourceType":{ "shape":"ResourceSourceType", - "documentation":"The type of input source.
" + "documentation":"Type of input source.
" } }, "documentation":"Defines a physical resource. A physical resource is a resource that exists in your account. It can be identified using an Amazon Resource Name (ARN) or an Resilience Hub-native identifier.
" @@ -3619,11 +4015,11 @@ }, "identifier":{ "shape":"String255", - "documentation":"The identifier of the physical resource.
" + "documentation":"Identifier of the physical resource.
" }, "type":{ "shape":"PhysicalIdentifierType", - "documentation":"Specifies the type of physical resource identifier.
The resource identifier is an Amazon Resource Name (ARN) .
The resource identifier is an Resilience Hub-native identifier.
Specifies the type of physical resource identifier.
The resource identifier is an Amazon Resource Name (ARN) and it can identify the following list of resources:
AWS::ECS::Service
AWS::EFS::FileSystem
AWS::ElasticLoadBalancingV2::LoadBalancer
AWS::Lambda::Function
AWS::SNS::Topic
The resource identifier is an Resilience Hub-native identifier and it can identify the following list of resources:
AWS::ApiGateway::RestApi
AWS::ApiGatewayV2::Api
AWS::AutoScaling::AutoScalingGroup
AWS::DocDB::DBCluster
AWS::DocDB::DBGlobalCluster
AWS::DocDB::DBInstance
AWS::DynamoDB::GlobalTable
AWS::DynamoDB::Table
AWS::EC2::EC2Fleet
AWS::EC2::Instance
AWS::EC2::NatGateway
AWS::EC2::Volume
AWS::ElasticLoadBalancing::LoadBalancer
AWS::RDS::DBCluster
AWS::RDS::DBInstance
AWS::RDS::GlobalCluster
AWS::Route53::RecordSet
AWS::S3::Bucket
AWS::SQS::Queue
Defines a physical resource identifier.
" @@ -3638,7 +4034,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Name of the application version.
" } } }, @@ -3648,11 +4048,19 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The version of the application.
" + }, + "identifier":{ + "shape":"LongOptional", + "documentation":"Identifier of the application version.
" + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"Name of the application version.
" } } }, @@ -3665,11 +4073,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section.
The appTemplateBody JSON string has the following structure:
resources
The list of logical resources that must be included in the Resilience Hub application.
Type: Array
Don't add the resources that you want to exclude.
Each resources array item includes the following fields:
logicalResourceId
The logical identifier of the resource.
Type: Object
Each logicalResourceId object includes the following fields:
identifier
The identifier of the resource.
Type: String
logicalStackName
The name of the CloudFormation stack this resource belongs to.
Type: String
resourceGroupName
The name of the resource group this resource belongs to.
Type: String
terraformSourceName
The name of the Terraform S3 state file this resource belongs to.
Type: String
eksSourceName
The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Type: String
type
The type of resource.
Type: string
name
The name of the resource.
Type: String
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
appComponents
The list of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
Type: Array
Each appComponents array item includes the following fields:
name
The name of the Application Component.
Type: String
type
The type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
Type: String
resourceNames
The list of included resources that are assigned to the Application Component.
Type: Array of strings
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
excludedResources
The list of logical resource identifiers to be excluded from the application.
Type: Array
Don't add the resources that you want to include.
Each excludedResources array item includes the following fields:
logicalResourceIds
The logical identifier of the resource.
Type: Object
You can configure only one of the following fields:
logicalStackName
resourceGroupName
terraformSourceName
eksSourceName
Each logicalResourceIds object includes the following fields:
identifier
The identifier of the resource.
Type: String
logicalStackName
The name of the CloudFormation stack this resource belongs to.
Type: String
resourceGroupName
The name of the resource group this resource belongs to.
Type: String
terraformSourceName
The name of the Terraform S3 state file this resource belongs to.
Type: String
eksSourceName
The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Type: String
version
The Resilience Hub application version.
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section.
The appTemplateBody JSON string has the following structure:
resources
The list of logical resources that must be included in the Resilience Hub application.
Type: Array
Don't add the resources that you want to exclude.
Each resources array item includes the following fields:
logicalResourceId
Logical identifier of the resource.
Type: Object
Each logicalResourceId object includes the following fields:
identifier
Identifier of the resource.
Type: String
logicalStackName
The name of the CloudFormation stack this resource belongs to.
Type: String
resourceGroupName
The name of the resource group this resource belongs to.
Type: String
terraformSourceName
The name of the Terraform S3 state file this resource belongs to.
Type: String
eksSourceName
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Type: String
type
The type of resource.
Type: string
name
The name of the resource.
Type: String
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
appComponents
List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
Type: Array
Each appComponents array item includes the following fields:
name
Name of the Application Component.
Type: String
type
Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
Type: String
resourceNames
The list of included resources that are assigned to the Application Component.
Type: Array of strings
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
excludedResources
The list of logical resource identifiers to be excluded from the application.
Type: Array
Don't add the resources that you want to include.
Each excludedResources array item includes the following fields:
logicalResourceIds
Logical identifier of the resource.
Type: Object
You can configure only one of the following fields:
logicalStackName
resourceGroupName
terraformSourceName
eksSourceName
Each logicalResourceIds object includes the following fields:
identifier
Identifier of the resource.
Type: String
logicalStackName
The name of the CloudFormation stack this resource belongs to.
Type: String
resourceGroupName
The name of the resource group this resource belongs to.
Type: String
terraformSourceName
The name of the Terraform S3 state file this resource belongs to.
Type: String
eksSourceName
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Type: String
version
Resilience Hub application version.
additionalInfo
Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.
Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.
Key: \"failover-regions\"
Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Specifies if the recommendation has already been implemented.
" }, + "excludeReason":{ + "shape":"ExcludeRecommendationReason", + "documentation":"Indicates the reason for excluding an operational recommendation.
" + }, + "excluded":{ + "shape":"BooleanOptional", + "documentation":"Indicates if an operational recommendation item is excluded.
" + }, "resourceId":{ "shape":"String500", - "documentation":"The resource identifier.
" + "documentation":"Identifier of the resource.
" }, "targetAccountId":{ "shape":"CustomerId", - "documentation":"The target account identifier.
" + "documentation":"Identifier of the target account.
" }, "targetRegion":{ "shape":"AwsRegion", @@ -3771,11 +4187,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The format of the recommendation template.
The template is CloudFormation JSON.
The template is CloudFormation YAML.
Format of the recommendation template.
The template is CloudFormation JSON.
The template is CloudFormation YAML.
The message for the recommendation template.
" + "documentation":"Message for the recommendation template.
" }, "name":{ "shape":"EntityName", - "documentation":"The name for the recommendation template.
" + "documentation":"Name for the recommendation template.
" }, "needsReplacements":{ "shape":"BooleanOptional", @@ -3803,7 +4219,7 @@ }, "recommendationTemplateArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) for the recommendation template.
" + "documentation":"Amazon Resource Name (ARN) for the recommendation template.
" }, "recommendationTypes":{ "shape":"RenderRecommendationTypeList", @@ -3815,11 +4231,11 @@ }, "status":{ "shape":"RecommendationTemplateStatus", - "documentation":"The status of the action.
" + "documentation":"Status of the action.
" }, "tags":{ "shape":"TagMap", - "documentation":"The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" }, "templatesLocation":{ "shape":"S3Location", @@ -3853,7 +4269,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" }, "tier":{ "shape":"ResiliencyPolicyTier", @@ -3961,7 +4377,8 @@ "Critical", "Important", "CoreServices", - "NonCritical" + "NonCritical", + "NotApplicable" ] }, "ResiliencyScore":{ @@ -3991,7 +4408,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The status of the action.
" + "documentation":"Status of the action.
" } } }, @@ -4031,11 +4448,11 @@ "members":{ "logicalResourceId":{ "shape":"String255", - "documentation":"This is the identifier of the resource.
" + "documentation":"Identifier of the logical resource.
" }, "physicalResourceId":{ "shape":"String255", - "documentation":"This is the identifier of the physical resource.
" + "documentation":"Identifier of the physical resource.
" }, "reason":{ "shape":"ErrorMessage", @@ -4095,7 +4512,7 @@ }, "eksSourceName":{ "shape":"String255", - "documentation":"The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.
This parameter accepts values in \"eks-cluster/namespace\" format.
The identifier of this resource.
" + "documentation":"Identifier of the physical resource.
" }, "resourceGroupName":{ "shape":"EntityName", - "documentation":"The name of the resource group this resource is mapped to.
" + "documentation":"Name of the resource group that the resource is mapped to.
" }, "resourceName":{ "shape":"EntityName", - "documentation":"The name of the resource this resource is mapped to.
" + "documentation":"Name of the resource that the resource is mapped to.
" }, "terraformSourceName":{ "shape":"String255", @@ -4229,11 +4646,11 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"The name of the Application Component.
" + "documentation":"Name of the Application Component.
" }, "description":{ "shape":"String500", - "documentation":"The description of the SOP recommendation.
" + "documentation":"Description of the SOP recommendation.
" }, "items":{ "shape":"RecommendationItemList", @@ -4241,11 +4658,11 @@ }, "name":{ "shape":"DocumentName", - "documentation":"The name of the SOP recommendation.
" + "documentation":"Name of the SOP recommendation.
" }, "prerequisite":{ "shape":"String500", - "documentation":"The prerequisite for the SOP recommendation.
" + "documentation":"Prerequisite for the SOP recommendation.
" }, "recommendationId":{ "shape":"Uuid", @@ -4253,7 +4670,7 @@ }, "referenceId":{ "shape":"SpecReferenceId", - "documentation":"The reference identifier for the SOP recommendation.
" + "documentation":"Reference identifier for the SOP recommendation.
" }, "serviceType":{ "shape":"SopServiceType", @@ -4285,7 +4702,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" + "documentation":"Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.
" } } }, @@ -4378,7 +4795,7 @@ "members":{ "resourceArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the resource.
", + "documentation":"Amazon Resource Name (ARN) of the resource.
", "location":"uri", "locationName":"resourceArn" }, @@ -4427,7 +4844,7 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"The name of the Application Component.
" + "documentation":"Name of the Application Component.
" }, "dependsOnAlarms":{ "shape":"AlarmReferenceIdList", @@ -4435,11 +4852,11 @@ }, "description":{ "shape":"String500", - "documentation":"The description for the test recommendation.
" + "documentation":"Description for the test recommendation.
" }, "intent":{ "shape":"EntityDescription", - "documentation":"The intent of the test recommendation.
" + "documentation":"Intent of the test recommendation.
" }, "items":{ "shape":"RecommendationItemList", @@ -4447,11 +4864,11 @@ }, "name":{ "shape":"DocumentName", - "documentation":"The name of the test recommendation.
" + "documentation":"Name of the test recommendation.
" }, "prerequisite":{ "shape":"String500", - "documentation":"The prerequisite of the test recommendation.
" + "documentation":"Prerequisite of the test recommendation.
" }, "recommendationId":{ "shape":"Uuid", @@ -4459,15 +4876,15 @@ }, "referenceId":{ "shape":"SpecReferenceId", - "documentation":"The reference identifier for the test recommendation.
" + "documentation":"Reference identifier for the test recommendation.
" }, "risk":{ "shape":"TestRisk", - "documentation":"The level of risk for this test recommendation.
" + "documentation":"Level of risk for this test recommendation.
" }, "type":{ "shape":"TestType", - "documentation":"The type of test recommendation.
" + "documentation":"Type of test recommendation.
" } }, "documentation":"Defines a test recommendation.
" @@ -4520,11 +4937,11 @@ "members":{ "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"The logical resource identifier for the unsupported resource.
" + "documentation":"Logical resource identifier for the unsupported resource.
" }, "physicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"The physical resource identifier for the unsupported resource.
" + "documentation":"Physical resource identifier for the unsupported resource.
" }, "resourceType":{ "shape":"String255", @@ -4550,7 +4967,7 @@ "members":{ "resourceArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the resource.
", + "documentation":"Amazon Resource Name (ARN) of the resource.
", "location":"uri", "locationName":"resourceArn" }, @@ -4573,7 +4990,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The optional description for an app.
" }, + "eventSubscriptions":{ + "shape":"EventSubscriptionList", + "documentation":"The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected and Scheduled assessment failure events.
" + }, + "permissionModel":{ + "shape":"PermissionModel", + "documentation":"Defines the roles and credentials that Resilience Hub would use while creating an application, importing its resources, and running an assessment.
" + }, "policyArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The identifier of the Application Component.
" + "documentation":"Identifier of the Application Component.
" }, "name":{ "shape":"String255", - "documentation":"The name of the Application Component.
" + "documentation":"Name of the Application Component.
" }, "type":{ "shape":"String255", - "documentation":"The type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
" + "documentation":"Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.
" } } }, @@ -4641,15 +5066,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The list of Application Components that belong to this resource.
" + "documentation":"List of Application Components that belong to this resource.
" }, "appVersion":{ "shape":"EntityVersion", - "documentation":"The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" } } }, @@ -4663,7 +5088,7 @@ }, "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The list of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
" + "documentation":"List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.
" }, "awsAccountId":{ "shape":"CustomerId", - "documentation":"The Amazon Web Services account that owns the physical resource.
" + "documentation":"Amazon Web Services account that owns the physical resource.
" }, "awsRegion":{ "shape":"AwsRegion", - "documentation":"The Amazon Web Services region that owns the physical resource.
" + "documentation":"Amazon Web Services region that owns the physical resource.
" }, "excluded":{ "shape":"BooleanOptional", @@ -4697,19 +5122,19 @@ }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"The logical identifier of the resource.
" + "documentation":"Logical identifier of the resource.
" }, "physicalResourceId":{ "shape":"String2048", - "documentation":"The physical identifier of the resource.
" + "documentation":"Physical identifier of the resource.
" }, "resourceName":{ "shape":"EntityName", - "documentation":"The name of the resource.
" + "documentation":"Name of the resource.
" }, "resourceType":{ "shape":"String255", - "documentation":"The type of resource.
" + "documentation":"Type of resource.
" } } }, @@ -4722,11 +5147,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" }, "physicalResource":{ "shape":"PhysicalResource", @@ -4747,14 +5172,70 @@ }, "appArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
The Resilience Hub application version.
" + "documentation":"Resilience Hub application version.
" } } }, + "UpdateRecommendationStatusItem":{ + "type":"structure", + "members":{ + "resourceId":{ + "shape":"String500", + "documentation":"Resource identifier of the operational recommendation item.
" + }, + "targetAccountId":{ + "shape":"CustomerId", + "documentation":"Identifier of the target Amazon Web Services account.
" + }, + "targetRegion":{ + "shape":"AwsRegion", + "documentation":"Identifier of the target Amazon Web Services Region.
" + } + }, + "documentation":"Defines the operational recommendation item that needs a status update.
" + }, + "UpdateRecommendationStatusRequestEntries":{ + "type":"list", + "member":{"shape":"UpdateRecommendationStatusRequestEntry"}, + "max":50, + "min":1 + }, + "UpdateRecommendationStatusRequestEntry":{ + "type":"structure", + "required":[ + "entryId", + "excluded", + "item", + "referenceId" + ], + "members":{ + "entryId":{ + "shape":"String255", + "documentation":"An identifier for an entry in this batch that is used to communicate the result.
The entryIds of a batch request need to be unique within a request.
Indicates the reason for excluding an operational recommendation.
" + }, + "excluded":{ + "shape":"BooleanOptional", + "documentation":"Indicates if the operational recommendation needs to be excluded. If set to True, the operational recommendation will be excluded.
" + }, + "item":{ + "shape":"UpdateRecommendationStatusItem", + "documentation":"The operational recommendation item.
" + }, + "referenceId":{ + "shape":"SpecReferenceId", + "documentation":"Reference identifier of the operational recommendation item.
" + } + }, + "documentation":"Defines the operational recommendation item that is to be included or excluded.
" + }, "UpdateResiliencyPolicyRequest":{ "type":"structure", "required":["policyArn"], @@ -4769,7 +5250,7 @@ }, "policyArn":{ "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.
Adds additional user attributes to the user pool schema.
" + "documentation":"Adds additional user attributes to the user pool schema.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Adds the specified user to the specified group.
Calling this action requires developer credentials.
" + "documentation":"Adds the specified user to the specified group.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Confirms user registration as an admin without using a confirmation code. Works on any user.
Calling this action requires developer credentials.
" + "documentation":"Confirms user registration as an admin without using a confirmation code. Works on any user.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Creates a new user in the specified user pool.
If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS).
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password.
Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email.
In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password.
AdminCreateUser requires developer credentials.
Creates a new user in the specified user pool.
If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS).
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password.
Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email.
In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Deletes a user as an administrator. Works on any user.
Calling this action requires developer credentials.
" + "documentation":"Deletes a user as an administrator. Works on any user.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Deletes the user attributes in a user pool as an administrator. Works on any user.
Calling this action requires developer credentials.
" + "documentation":"Deletes the user attributes in a user pool as an administrator. Works on any user.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Prevents the user from signing in with the specified external (SAML or social) identity provider (IdP). If the user that you want to deactivate is a Amazon Cognito user pools native username + password user, they can't use their password to sign in. If the user to deactivate is a linked external IdP user, any link between that user and an existing user is removed. When the external user signs in again, and the user is no longer attached to the previously linked DestinationUser, the user must create a new user account. See AdminLinkProviderForUser.
This action is enabled only for admin access and requires developer credentials.
The ProviderName must match the value specified when creating an IdP for the pool.
To deactivate a native username + password user, the ProviderName value must be Cognito and the ProviderAttributeName must be Cognito_Subject. The ProviderAttributeValue must be the name that is used in the user pool for the user.
The ProviderAttributeName must always be Cognito_Subject for social IdPs. The ProviderAttributeValue must always be the exact subject that was used when the user was originally linked as a source user.
For de-linking a SAML identity, there are two scenarios. If the linked identity has not yet been used to sign in, the ProviderAttributeName and ProviderAttributeValue must be the same values that were used for the SourceUser when the identities were originally linked using AdminLinkProviderForUser call. (If the linking was done with ProviderAttributeName set to Cognito_Subject, the same applies here). However, if the user has already signed in, the ProviderAttributeName must be Cognito_Subject and ProviderAttributeValue must be the subject of the SAML assertion.
Prevents the user from signing in with the specified external (SAML or social) identity provider (IdP). If the user that you want to deactivate is a Amazon Cognito user pools native username + password user, they can't use their password to sign in. If the user to deactivate is a linked external IdP user, any link between that user and an existing user is removed. When the external user signs in again, and the user is no longer attached to the previously linked DestinationUser, the user must create a new user account. See AdminLinkProviderForUser.
The ProviderName must match the value specified when creating an IdP for the pool.
To deactivate a native username + password user, the ProviderName value must be Cognito and the ProviderAttributeName must be Cognito_Subject. The ProviderAttributeValue must be the name that is used in the user pool for the user.
The ProviderAttributeName must always be Cognito_Subject for social IdPs. The ProviderAttributeValue must always be the exact subject that was used when the user was originally linked as a source user.
For de-linking a SAML identity, there are two scenarios. If the linked identity has not yet been used to sign in, the ProviderAttributeName and ProviderAttributeValue must be the same values that were used for the SourceUser when the identities were originally linked using AdminLinkProviderForUser call. (If the linking was done with ProviderAttributeName set to Cognito_Subject, the same applies here). However, if the user has already signed in, the ProviderAttributeName must be Cognito_Subject and ProviderAttributeValue must be the subject of the SAML assertion.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Deactivates a user and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to GetUser and ListUsers API requests.
You must make this API request with Amazon Web Services credentials that have cognito-idp:AdminDisableUser permissions.
Deactivates a user and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to GetUser and ListUsers API requests.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Enables the specified user as an administrator. Works on any user.
Calling this action requires developer credentials.
" + "documentation":"Enables the specified user as an administrator. Works on any user.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Forgets the device, as an administrator.
Calling this action requires developer credentials.
" + "documentation":"Forgets the device, as an administrator.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Gets the device, as an administrator.
Calling this action requires developer credentials.
" + "documentation":"Gets the device, as an administrator.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Gets the specified user by user name in a user pool as an administrator. Works on any user.
Calling this action requires developer credentials.
" + "documentation":"Gets the specified user by user name in a user pool as an administrator. Works on any user.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Initiates the authentication flow, as an administrator.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this action requires developer credentials.
" + "documentation":"Initiates the authentication flow, as an administrator.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Links an existing user account in a user pool (DestinationUser) to an identity from an external IdP (SourceUser) based on a specified attribute name and value from the external IdP. This allows you to create a link from the existing user account to an external federated user identity that has not yet been used to sign in. You can then use the federated user identity to sign in as the existing user account.
For example, if there is an existing user with a username and password, this API links that user to a federated user identity. When the user signs in with a federated user identity, they sign in as the existing user account.
The maximum number of federated identities linked to a user is five.
Because this API allows a user with an external federated identity to sign in as an existing user in the user pool, it is critical that it only be used with external IdPs and provider attributes that have been trusted by the application owner.
This action is administrative and requires developer credentials.
" + "documentation":"Links an existing user account in a user pool (DestinationUser) to an identity from an external IdP (SourceUser) based on a specified attribute name and value from the external IdP. This allows you to create a link from the existing user account to an external federated user identity that has not yet been used to sign in. You can then use the federated user identity to sign in as the existing user account.
For example, if there is an existing user with a username and password, this API links that user to a federated user identity. When the user signs in with a federated user identity, they sign in as the existing user account.
The maximum number of federated identities linked to a user is five.
Because this API allows a user with an external federated identity to sign in as an existing user in the user pool, it is critical that it only be used with external IdPs and provider attributes that have been trusted by the application owner.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists devices, as an administrator.
Calling this action requires developer credentials.
" + "documentation":"Lists devices, as an administrator.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists the groups that the user belongs to.
Calling this action requires developer credentials.
" + "documentation":"Lists the groups that the user belongs to.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
A history of user activity and any risks detected as part of Amazon Cognito advanced security.
" + "documentation":"A history of user activity and any risks detected as part of Amazon Cognito advanced security.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Removes the specified user from the specified group.
Calling this action requires developer credentials.
" + "documentation":"Removes the specified user from the specified group.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Resets the specified user's password in a user pool as an administrator. Works on any user.
When a developer calls this API, the current password is invalidated, so it must be changed. If a user tries to sign in after the API is called, the app will get a PasswordResetRequiredException exception back and should direct the user down the flow to reset the password, which is the same as the forgot password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this action requires developer credentials.
" + "documentation":"Resets the specified user's password in a user pool as an administrator. Works on any user.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Responds to an authentication challenge, as an administrator.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this action requires developer credentials.
" + "documentation":"Responds to an authentication challenge, as an administrator.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
The user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in.
" + "documentation":"The user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Sets the specified user's password in a user pool as an administrator. Works on any user.
The password can be temporary or permanent. If it is temporary, the user status enters the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password.
Once the user has set a new password, or the password is permanent, the user status is set to Confirmed.
Sets the specified user's password in a user pool as an administrator. Works on any user.
The password can be temporary or permanent. If it is temporary, the user status enters the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password.
Once the user has set a new password, or the password is permanent, the user status is set to Confirmed.
AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use AdminSetUserMFAPreference instead.
" + "documentation":"This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use AdminSetUserMFAPreference instead.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
" + "documentation":"Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Updates the device status as an administrator.
Calling this action requires developer credentials.
" + "documentation":"Updates the device status as an administrator.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.
For custom attributes, you must prepend the custom: prefix to the attribute name.
In addition to updating user attributes, this API can also be used to mark phone and email as verified.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this action requires developer credentials.
" + "documentation":"This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.
For custom attributes, you must prepend the custom: prefix to the attribute name.
In addition to updating user attributes, this API can also be used to mark phone and email as verified.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Signs out a user from all devices. You must sign AdminUserGlobalSignOut requests with Amazon Web Services credentials. It also invalidates all refresh tokens that Amazon Cognito has issued to a user. The user's current access and ID tokens remain valid until they expire. By default, access and ID tokens expire one hour after they're issued. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the cookie validity period of 1 hour.
Calling this action requires developer credentials.
" + "documentation":"Signs out a user from all devices. AdminUserGlobalSignOut invalidates all identity, access and refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.
Your app isn't aware that a user's access token is revoked unless it attempts to authorize a user pools API request with an access token that contains the scope aws.cognito.signin.user.admin. Your app might otherwise accept access tokens until they expire.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.
Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.
After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.
Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.
Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.
After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Changes the password for a specified user in a user pool.
", + "documentation":"Changes the password for a specified user in a user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Confirms tracking of the device. This API call is the call that begins device tracking.
" + "documentation":"Confirms tracking of the device. This API call is the call that begins device tracking.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Allows a user to enter a confirmation code to reset a forgotten password.
", + "documentation":"Allows a user to enter a confirmation code to reset a forgotten password.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Confirms registration of a new user.
", + "documentation":"Confirms registration of a new user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Creates a new group in the specified user pool.
Calling this action requires developer credentials.
" + "documentation":"Creates a new group in the specified user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Creates an IdP for a user pool.
" + "documentation":"Creates an IdP for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Creates a new OAuth2.0 resource server and defines custom scopes within it.
" + "documentation":"Creates a new OAuth2.0 resource server and defines custom scopes within it.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Creates the user import job.
" + "documentation":"Creates a user import job.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Creates a new Amazon Cognito user pool and sets the password policy for the pool.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Creates a new Amazon Cognito user pool and sets the password policy for the pool.
If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Creates the user pool client.
When you create a new user pool client, token revocation is automatically activated. For more information about revoking tokens, see RevokeToken.
" + "documentation":"Creates the user pool client.
When you create a new user pool client, token revocation is automatically activated. For more information about revoking tokens, see RevokeToken.
If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Creates a new domain for a user pool.
" + "documentation":"Creates a new domain for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Allows a user to delete himself or herself.
", + "documentation":"Allows a user to delete their own user profile.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Deletes the attributes for a user.
", + "documentation":"Deletes the attributes for a user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Returns the configuration information and metadata of the specified user pool.
" + "documentation":"Returns the configuration information and metadata of the specified user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Client method for returning the configuration information and metadata of the specified user pool app client.
" + "documentation":"Client method for returning the configuration information and metadata of the specified user pool app client.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Forgets the specified device.
" + "documentation":"Forgets the specified device.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. If neither a verified phone number nor a verified email exists, an InvalidParameterException is thrown. To use the confirmation code for resetting the password, call ConfirmForgotPassword.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.
If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Gets the device.
" + "documentation":"Gets the device.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Gets the specified IdP.
" }, + "GetLogDeliveryConfiguration":{ + "name":"GetLogDeliveryConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLogDeliveryConfigurationRequest"}, + "output":{"shape":"GetLogDeliveryConfigurationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"Gets the detailed activity logging configuration for a user pool.
" + }, "GetSigningCertificate":{ "name":"GetSigningCertificate", "http":{ @@ -1248,7 +1265,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"Gets the user attributes and metadata for a user.
", + "documentation":"Gets the user attributes and metadata for a user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Signs out users from all devices. It also invalidates all refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.
" + "documentation":"Signs out a user from all devices. GlobalSignOut invalidates all identity, access and refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.
Your app isn't aware that a user's access token is revoked unless it attempts to authorize a user pools API request with an access token that contains the scope aws.cognito.signin.user.admin. Your app might otherwise accept access tokens until they expire.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Lists the sign-in devices that Amazon Cognito has registered to the current user.
" + "documentation":"Lists the sign-in devices that Amazon Cognito has registered to the current user.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Lists the groups associated with a user pool.
Calling this action requires developer credentials.
" + "documentation":"Lists the groups associated with a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists information about all IdPs for a user pool.
" + "documentation":"Lists information about all IdPs for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists the resource servers for a user pool.
" + "documentation":"Lists the resource servers for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists the user import jobs.
" + "documentation":"Lists user import jobs for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists the clients that have been created for the specified user pool.
" + "documentation":"Lists the clients that have been created for the specified user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists the user pools associated with an Amazon Web Services account.
" + "documentation":"Lists the user pools associated with an Amazon Web Services account.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists the users in the Amazon Cognito user pool.
" + "documentation":"Lists users and their basic details in a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Lists the users in the specified group.
Calling this action requires developer credentials.
" + "documentation":"Lists the users in the specified group.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Resends the confirmation (for confirmation of registration) to a specific user in the user pool.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Resends the confirmation (for confirmation of registration) to a specific user in the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Responds to the authentication challenge.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Responds to the authentication challenge.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.
" + "documentation":"Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Sets up or modifies the detailed activity logging configuration of a user pool.
" }, "SetRiskConfiguration":{ "name":"SetRiskConfiguration", @@ -1656,7 +1690,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.
" + "documentation":"Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Sets the user pool multi-factor authentication (MFA) configuration.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Sets the user pool multi-factor authentication (MFA) configuration.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
", + "documentation":"This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Registers the user in the specified user pool and creates a user name, password, and user attributes.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Registers the user in the specified user pool and creates a user name, password, and user attributes.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
" + "documentation":"Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Updates the device status.
" + "documentation":"Updates the device status.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Updates the specified group with the specified attributes.
Calling this action requires developer credentials.
" + "documentation":"Updates the specified group with the specified attributes.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Updates IdP information for a user pool.
" + "documentation":"Updates IdP information for a user pool.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Updates the name and scopes of resource server. All other fields are read-only.
If you don't provide a value for an attribute, it is set to the default value.
Updates the name and scopes of resource server. All other fields are read-only.
If you don't provide a value for an attribute, it is set to the default value.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Allows a user to update a specific attribute (one at a time).
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Allows a user to update a specific attribute (one at a time).
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool. If you don't provide a value for an attribute, it will be set to the default value.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.
If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.
Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool.
If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Updates the specified user pool app client with the specified attributes. You can get a list of the current user pool app client settings using DescribeUserPoolClient.
If you don't provide a value for an attribute, it will be set to the default value.
You can also use this operation to enable token revocation for user pool clients. For more information about revoking tokens, see RevokeToken.
" + "documentation":"Updates the specified user pool app client with the specified attributes. You can get a list of the current user pool app client settings using DescribeUserPoolClient.
If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.
You can also use this operation to enable token revocation for user pool clients. For more information about revoking tokens, see RevokeToken.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.
You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You can't use it to change the domain for a user pool.
A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.
Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.
However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.
When you add your new certificate in ACM, you must choose US East (N. Virginia) as the Amazon Web Services Region.
After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.
For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.
" + "documentation":"Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.
You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You can't use it to change the domain for a user pool.
A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.
Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.
However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.
When you add your new certificate in ACM, you must choose US East (N. Virginia) as the Amazon Web Services Region.
After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.
For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.
Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.
Learn more
Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.
" + "documentation":"Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
Verifies the specified user attributes in the user pool.
If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.
", + "documentation":"Verifies the specified user attributes in the user pool.
If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.
Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.
A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.
If your user pool configuration includes triggers, the AdminConfirmSignUp API action invokes the Lambda function that is specified for the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. In this payload, the clientMetadata attribute provides the data that you assigned to the ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in Lambda, you can process the ClientMetadata value to enhance your workflow for your specific needs.
For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.
When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:
Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose.
Validate the ClientMetadata value.
Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information.
Represents the request to confirm user registration.
" + "documentation":"Confirm a user's registration as a user pool administrator.
" }, "AdminConfirmSignUpResponse":{ "type":"structure", @@ -2230,7 +2268,7 @@ }, "TemporaryPassword":{ "shape":"PasswordType", - "documentation":"The user's temporary password. This password must conform to the password policy that you specified when you created the user pool.
The temporary password is valid only once. To complete the Admin Create User flow, the user must enter the temporary password in the sign-in page, along with a new password to be used in all future sign-ins.
This parameter isn't required. If you don't specify a value, Amazon Cognito generates one for you.
The temporary password can only be used until the user account expiration limit that you specified when you created the user pool. To reset the account after that time limit, you must call AdminCreateUser again, specifying \"RESEND\" for the MessageAction parameter.
The user's temporary password. This password must conform to the password policy that you specified when you created the user pool.
The temporary password is valid only once. To complete the Admin Create User flow, the user must enter the temporary password in the sign-in page, along with a new password to be used in all future sign-ins.
This parameter isn't required. If you don't specify a value, Amazon Cognito generates one for you.
The temporary password can only be used until the user account expiration limit that you set for your user pool. To reset the account after that time limit, you must call AdminCreateUser again and specify RESEND for the MessageAction parameter.
The user name of the user about whom you're receiving information.
" + "documentation":"The username of the user that you requested.
" }, "UserAttributes":{ "shape":"AttributeListType", @@ -2476,7 +2514,7 @@ }, "UserLastModifiedDate":{ "shape":"DateType", - "documentation":"The date the user was last modified.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "Enabled":{ "shape":"BooleanType", @@ -2484,7 +2522,7 @@ }, "UserStatus":{ "shape":"UserStatusType", - "documentation":"The user status. Can be one of the following:
UNCONFIRMED - User has been created but not confirmed.
CONFIRMED - User has been confirmed.
ARCHIVED - User is no longer active.
UNKNOWN - User status isn't known.
RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in.
FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else.
The user status. Can be one of the following:
UNCONFIRMED - User has been created but not confirmed.
CONFIRMED - User has been confirmed.
UNKNOWN - User status isn't known.
RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in.
FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else.
The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow:
For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For ADMIN_NO_SRP_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), PASSWORD (required), DEVICE_KEY.
For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value).
The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow:
For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For ADMIN_USER_PASSWORD_AUTH: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value).
For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool.
The existing user in the user pool that you want to assign to the external IdP user account. This user can be a native (Username + Password) Amazon Cognito user pools user or a federated user (for example, a SAML or Facebook user). If the user doesn't exist, Amazon Cognito generates an exception. Amazon Cognito returns this user when the new user (with the linked IdP attribute) signs in.
For a native username + password user, the ProviderAttributeValue for the DestinationUser should be the username in the user pool. For a federated user, it should be the provider-specific user_id.
The ProviderAttributeName of the DestinationUser is ignored.
The ProviderName should be set to Cognito for users in Cognito user pools.
All attributes in the DestinationUser profile must be mutable. If you have assigned the user any immutable custom attributes, the operation won't succeed.
The existing user in the user pool that you want to assign to the external IdP user account. This user can be a local (Username + Password) Amazon Cognito user pools user or a federated user (for example, a SAML or Facebook user). If the user doesn't exist, Amazon Cognito generates an exception. Amazon Cognito returns this user when the new user (with the linked IdP attribute) signs in.
For a native username + password user, the ProviderAttributeValue for the DestinationUser should be the username in the user pool. For a federated user, it should be the provider-specific user_id.
The ProviderAttributeName of the DestinationUser is ignored.
The ProviderName should be set to Cognito for users in Cognito user pools.
All attributes in the DestinationUser profile must be mutable. If you have assigned the user any immutable custom attributes, the operation won't succeed.
An external IdP account for a user who doesn't exist yet in the user pool. This user must be a federated user (for example, a SAML or Facebook user), not another native user.
If the SourceUser is using a federated social IdP, such as Facebook, Google, or Login with Amazon, you must set the ProviderAttributeName to Cognito_Subject. For social IdPs, the ProviderName will be Facebook, Google, or LoginWithAmazon, and Amazon Cognito will automatically parse the Facebook, Google, and Login with Amazon tokens for id, sub, and user_id, respectively. The ProviderAttributeValue for the user must be the same value as the id, sub, or user_id value found in the social IdP token.
For SAML, the ProviderAttributeName can be any value that matches a claim in the SAML assertion. If you want to link SAML users based on the subject of the SAML assertion, you should map the subject to a claim through the SAML IdP and submit that claim name as the ProviderAttributeName. If you set ProviderAttributeName to Cognito_Subject, Amazon Cognito will automatically parse the default unique identifier found in the subject from the SAML token.
An external IdP account for a user who doesn't exist yet in the user pool. This user must be a federated user (for example, a SAML or Facebook user), not another native user.
If the SourceUser is using a federated social IdP, such as Facebook, Google, or Login with Amazon, you must set the ProviderAttributeName to Cognito_Subject. For social IdPs, the ProviderName will be Facebook, Google, or LoginWithAmazon, and Amazon Cognito will automatically parse the Facebook, Google, and Login with Amazon tokens for id, sub, and user_id, respectively. The ProviderAttributeValue for the user must be the same value as the id, sub, or user_id value found in the social IdP token.
For OIDC, the ProviderAttributeName can be any value that matches a claim in the ID token, or that your app retrieves from the userInfo endpoint. You must map the claim to a user pool attribute in your IdP configuration, and set the user pool attribute name as the value of ProviderAttributeName in your AdminLinkProviderForUser request.
For SAML, the ProviderAttributeName can be any value that matches a claim in the SAML assertion. To link SAML users based on the subject of the SAML assertion, map the subject to a claim through the SAML IdP and set that claim name as the value of ProviderAttributeName in your AdminLinkProviderForUser request.
For both OIDC and SAML users, when you set ProviderAttributeName to Cognito_Subject, Amazon Cognito will automatically parse the default unique identifier found in the subject from the IdP token.
The challenge responses. These are inputs corresponding to the value of ChallengeName, for example:
SMS_MFA: SMS_MFA_CODE, USERNAME, SECRET_HASH (if app client is configured with client secret).
PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME, SECRET_HASH (if app client is configured with client secret).
PASSWORD_VERIFIER requires DEVICE_KEY when signing in with a remembered device.
ADMIN_NO_SRP_AUTH: PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret).
NEW_PASSWORD_REQUIRED: NEW_PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required attributes that Amazon Cognito returned as requiredAttributes in the AdminInitiateAuth response, add a userAttributes.attributename parameter. This parameter can also set values for writable attributes that aren't required by your user pool.
In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. In AdminRespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the AdminUpdateUserAttributes API operation to modify the value of any additional attributes.
MFA_SETUP requires USERNAME, plus you must use the session value returned by VerifySoftwareToken in the Session parameter.
The value of the USERNAME attribute must be the user's actual username, not an alias (such as an email address or phone number). To make this simpler, the AdminInitiateAuth response includes the actual username value in the USERNAMEUSER_ID_FOR_SRP attribute. This happens even if you specified an alias in your call to AdminInitiateAuth.
The challenge responses. These are inputs corresponding to the value of ChallengeName, for example:
SMS_MFA: SMS_MFA_CODE, USERNAME, SECRET_HASH (if app client is configured with client secret).
PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME, SECRET_HASH (if app client is configured with client secret).
PASSWORD_VERIFIER requires DEVICE_KEY when signing in with a remembered device.
ADMIN_NO_SRP_AUTH: PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret).
NEW_PASSWORD_REQUIRED: NEW_PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required attributes that Amazon Cognito returned as requiredAttributes in the AdminInitiateAuth response, add a userAttributes.attributename parameter. This parameter can also set values for writable attributes that aren't required by your user pool.
In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. In AdminRespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the AdminUpdateUserAttributes API operation to modify the value of any additional attributes.
MFA_SETUP requires USERNAME, plus you must use the session value returned by VerifySoftwareToken in the Session parameter.
The value of the USERNAME attribute must be the user's actual username, not an alias (such as an email address or phone number). To make this simpler, the AdminInitiateAuth response includes the actual username value in the USERNAMEUSER_ID_FOR_SRP attribute. This happens even if you specified an alias in your call to AdminInitiateAuth.
For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool.
The authentication event feedback value.
" + "documentation":"The authentication event feedback value. When you provide a FeedbackValue value of valid, you tell Amazon Cognito that you trust a user session where Amazon Cognito has evaluated some level of risk. When you provide a FeedbackValue value of invalid, you tell Amazon Cognito that you don't trust a user session, or you don't believe that Amazon Cognito evaluated a high-enough risk level.
If UserDataShared is true, Amazon Cognito includes user data in the events that it publishes to Amazon Pinpoint analytics.
The Amazon Pinpoint analytics configuration necessary to collect metrics for a user pool.
In Regions where Amazon Pinpointisn't available, user pools only support sending events to Amazon Pinpoint projects in us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region.
The Amazon Pinpoint analytics configuration necessary to collect metrics for a user pool.
In Regions where Amazon Pinpoint isn't available, user pools only support sending events to Amazon Pinpoint projects in us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region.
The creation date
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" }, "EventResponse":{ "shape":"EventResponseType", @@ -3289,7 +3327,11 @@ "max":200 }, "BooleanType":{"type":"boolean"}, - "CSSType":{"type":"string"}, + "CSSType":{ + "type":"string", + "max":131072, + "min":0 + }, "CSSVersionType":{"type":"string"}, "CallbackURLsListType":{ "type":"list", @@ -3352,8 +3394,7 @@ "ChallengeResponsesType":{ "type":"map", "key":{"shape":"StringType"}, - "value":{"shape":"StringType"}, - "sensitive":true + "value":{"shape":"StringType"} }, "ChangePasswordRequest":{ "type":"structure", @@ -3418,6 +3459,16 @@ "pattern":"[\\w+]+", "sensitive":true }, + "CloudWatchLogsConfigurationType":{ + "type":"structure", + "members":{ + "LogGroupArn":{ + "shape":"ArnType", + "documentation":"The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account as your user pool.
" + } + }, + "documentation":"The CloudWatch logging destination of a user pool detailed activity logging configuration.
" + }, "CodeDeliveryDetailsListType":{ "type":"list", "member":{"shape":"CodeDeliveryDetailsType"} @@ -3563,7 +3614,7 @@ }, "SecretHash":{ "shape":"SecretHashType", - "documentation":"A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.
" + "documentation":"A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values.
The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.
For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.
The default time unit for AccessTokenValidity in an API request is hours. Valid range is displayed below in seconds.
If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.
" + "documentation":"The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.
For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.
The default time unit for IdTokenValidity in an API request is hours. Valid range is displayed below in seconds.
If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.
" }, "TokenValidityUnits":{ "shape":"TokenValidityUnitsType", @@ -3910,7 +3961,7 @@ }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", - "documentation":"Set to true if the client is allowed to follow the OAuth protocol when interacting with Amazon Cognito user pools.
" + "documentation":"Set to true to use OAuth 2.0 features in your user pool app client.
AllowedOAuthFlowsUserPoolClient must be true before you can configure the following features in your app client.
CallBackURLs: Callback URLs.
LogoutURLs: Sign-out redirect URLs.
AllowedOAuthScopes: OAuth 2.0 scopes.
AllowedOAuthFlows: Support for authorization code, implicit, and client credentials OAuth 2.0 grants.
To use OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient or UpdateUserPoolClient API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient in a request with the CLI or SDKs, it defaults to false.
Enables advanced security risk detection. Set the key AdvancedSecurityMode to the value \"AUDIT\".
User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.
For more information, see Adding advanced security to a user pool.
" }, "UsernameConfiguration":{ "shape":"UsernameConfigurationType", - "documentation":"Case sensitivity on the username input for the selected sign-in option. For example, when case sensitivity is set to False, users can sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set. For more information, see UsernameConfigurationType.
Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to False (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, username, USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set case sensitivity to False (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.
This configuration is immutable after you set it. For more information, see UsernameConfigurationType.
" }, "AccountRecoverySetting":{ "shape":"AccountRecoverySettingType", @@ -4559,7 +4610,7 @@ }, "DeviceLastModifiedDate":{ "shape":"DateType", - "documentation":"The last modified date of the device.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "DeviceLastAuthenticatedDate":{ "shape":"DateType", @@ -4589,7 +4640,7 @@ }, "CloudFrontDistribution":{ "shape":"StringType", - "documentation":"The Amazon Resource Name (ARN) of the Amazon CloudFront distribution.
" + "documentation":"The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider.
" }, "Version":{ "shape":"DomainVersionType", @@ -4644,7 +4695,7 @@ "members":{ "SourceArn":{ "shape":"ArnType", - "documentation":"The ARN of a verified email address in Amazon SES. Amazon Cognito uses this email address in one of the following ways, depending on the value that you specify for the EmailSendingAccount parameter:
If you specify COGNITO_DEFAULT, Amazon Cognito uses this address as the custom FROM address when it emails your users using its built-in email account.
If you specify DEVELOPER, Amazon Cognito emails your users with this address by calling Amazon SES on your behalf.
The Region value of the SourceArn parameter must indicate a supported Amazon Web Services Region of your user pool. Typically, the Region in the SourceArn and the user pool Region are the same. For more information, see Amazon SES email configuration regions in the Amazon Cognito Developer Guide.
The ARN of a verified email address or an address from a verified domain in Amazon SES. You can set a SourceArn email from a verified domain only with an API request. You can set a verified email address, but not an address in a verified domain, in the Amazon Cognito console. Amazon Cognito uses the email address that you provide in one of the following ways, depending on the value that you specify for the EmailSendingAccount parameter:
If you specify COGNITO_DEFAULT, Amazon Cognito uses this address as the custom FROM address when it emails your users using its built-in email account.
If you specify DEVELOPER, Amazon Cognito emails your users with this address by calling Amazon SES on your behalf.
The Region value of the SourceArn parameter must indicate a supported Amazon Web Services Region of your user pool. Typically, the Region in the SourceArn and the user pool Region are the same. For more information, see Amazon SES email configuration regions in the Amazon Cognito Developer Guide.
The event feedback value.
" + "documentation":"The authentication event feedback value. When you provide a FeedbackValue value of valid, you tell Amazon Cognito that you trust a user session where Amazon Cognito has evaluated some level of risk. When you provide a FeedbackValue value of invalid, you tell Amazon Cognito that you don't trust a user session, or you don't believe that Amazon Cognito evaluated a high-enough risk level.
The event risk type.
" }, + "EventSourceName":{ + "type":"string", + "enum":["userNotification"] + }, "EventType":{ "type":"string", "enum":[ @@ -5029,6 +5084,25 @@ } } }, + "GetLogDeliveryConfigurationRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"The ID of the user pool where you want to view detailed activity logging configuration.
" + } + } + }, + "GetLogDeliveryConfigurationResponse":{ + "type":"structure", + "members":{ + "LogDeliveryConfiguration":{ + "shape":"LogDeliveryConfigurationType", + "documentation":"The detailed activity logging configuration of the requested user pool.
" + } + } + }, "GetSigningCertificateRequest":{ "type":"structure", "required":["UserPoolId"], @@ -5153,7 +5227,7 @@ "members":{ "Username":{ "shape":"UsernameType", - "documentation":"The user name of the user you want to retrieve from the get user request.
" + "documentation":"The username of the user that you requested.
" }, "UserAttributes":{ "shape":"AttributeListType", @@ -5234,11 +5308,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"The date the group was last modified.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "CreationDate":{ "shape":"DateType", - "documentation":"The date the group was created.
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" } }, "documentation":"The group type.
" @@ -5299,11 +5373,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"The date the IdP was last modified.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "CreationDate":{ "shape":"DateType", - "documentation":"The date the IdP was created.
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" } }, "documentation":"A container for information about an IdP.
" @@ -5331,7 +5405,11 @@ "max":50, "min":0 }, - "ImageFileType":{"type":"blob"}, + "ImageFileType":{ + "type":"blob", + "max":131072, + "min":0 + }, "ImageUrlType":{"type":"string"}, "InitiateAuthRequest":{ "type":"structure", @@ -5346,7 +5424,7 @@ }, "AuthParameters":{ "shape":"AuthParametersType", - "documentation":"The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow:
For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value).
The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow:
For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For USER_PASSWORD_AUTH: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.
For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value).
For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool.
An array of strings, where each string is the name of a user attribute to be returned for each user in the search results. If the array is null, all attributes are returned.
" + "documentation":"A JSON array of user attribute names, for example given_name, that you want Amazon Cognito to include in the response for each user. When you don't provide an AttributesToGet parameter, Amazon Cognito returns all attributes for each user.
The users returned in the request to list users.
" + "documentation":"A list of the user pool users, and their attributes, that match your query.
Amazon Cognito creates a profile in your user pool for each native user in your user pool, and each unique user ID from your third-party identity providers (IdPs). When you link users with the AdminLinkProviderForUser API operation, the output of ListUsers displays both the IdP user and the native user that you linked. You can identify IdP users in the Users object of this API response by the IdP prefix that Amazon Cognito appends to Username.
The response from the request to list users.
" }, + "LogConfigurationListType":{ + "type":"list", + "member":{"shape":"LogConfigurationType"}, + "max":1, + "min":0 + }, + "LogConfigurationType":{ + "type":"structure", + "required":[ + "LogLevel", + "EventSource" + ], + "members":{ + "LogLevel":{ + "shape":"LogLevel", + "documentation":"The errorlevel selection of logs that a user pool sends for detailed activity logging.
The source of events that your user pool sends for detailed activity logging.
" + }, + "CloudWatchLogsConfiguration":{ + "shape":"CloudWatchLogsConfigurationType", + "documentation":"The CloudWatch logging destination of a user pool.
" + } + }, + "documentation":"The logging parameters of a user pool.
" + }, + "LogDeliveryConfigurationType":{ + "type":"structure", + "required":[ + "UserPoolId", + "LogConfigurations" + ], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"The ID of the user pool where you configured detailed activity logging.
" + }, + "LogConfigurations":{ + "shape":"LogConfigurationListType", + "documentation":"The detailed activity logging destination of a user pool.
" + } + }, + "documentation":"The logging parameters of a user pool.
" + }, + "LogLevel":{ + "type":"string", + "enum":["ERROR"] + }, "LogoutURLsListType":{ "type":"list", "member":{"shape":"RedirectUrlType"}, @@ -6175,7 +6303,7 @@ }, "CreationDate":{ "shape":"DateType", - "documentation":"The date the provider was added to the user pool.
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" } }, "documentation":"A container for IdP details.
" @@ -6427,7 +6555,7 @@ }, "ChallengeResponses":{ "shape":"ChallengeResponsesType", - "documentation":"The challenge responses. These are inputs corresponding to the value of ChallengeName, for example:
SECRET_HASH (if app client is configured with client secret) applies to all of the inputs that follow (including SOFTWARE_TOKEN_MFA).
SMS_MFA: SMS_MFA_CODE, USERNAME.
PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME.
PASSWORD_VERIFIER requires DEVICE_KEY when you sign in with a remembered device.
NEW_PASSWORD_REQUIRED: NEW_PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required attributes that Amazon Cognito returned as requiredAttributes in the InitiateAuth response, add a userAttributes.attributename parameter. This parameter can also set values for writable attributes that aren't required by your user pool.
In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the value of any additional attributes.
SOFTWARE_TOKEN_MFA: USERNAME and SOFTWARE_TOKEN_MFA_CODE are required attributes.
DEVICE_SRP_AUTH requires USERNAME, DEVICE_KEY, SRP_A (and SECRET_HASH).
DEVICE_PASSWORD_VERIFIER requires everything that PASSWORD_VERIFIER requires, plus DEVICE_KEY.
MFA_SETUP requires USERNAME, plus you must use the session value returned by VerifySoftwareToken in the Session parameter.
The challenge responses. These are inputs corresponding to the value of ChallengeName, for example:
SECRET_HASH (if app client is configured with client secret) applies to all of the inputs that follow (including SOFTWARE_TOKEN_MFA).
SMS_MFA: SMS_MFA_CODE, USERNAME.
PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME.
PASSWORD_VERIFIER requires DEVICE_KEY when you sign in with a remembered device.
NEW_PASSWORD_REQUIRED: NEW_PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required attributes that Amazon Cognito returned as requiredAttributes in the InitiateAuth response, add a userAttributes.attributename parameter. This parameter can also set values for writable attributes that aren't required by your user pool.
In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the value of any additional attributes.
SOFTWARE_TOKEN_MFA: USERNAME and SOFTWARE_TOKEN_MFA_CODE are required attributes.
DEVICE_SRP_AUTH requires USERNAME, DEVICE_KEY, SRP_A (and SECRET_HASH).
DEVICE_PASSWORD_VERIFIER requires everything that PASSWORD_VERIFIER requires, plus DEVICE_KEY.
MFA_SETUP requires USERNAME, plus you must use the session value returned by VerifySoftwareToken in the Session parameter.
For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool.
The last modified date.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" } }, "documentation":"The risk configuration type.
" @@ -6583,11 +6711,11 @@ "members":{ "Name":{ "shape":"CustomAttributeNameType", - "documentation":"A schema attribute of the name type.
" + "documentation":"The name of your user pool attribute, for example username or custom:costcenter.
The attribute data type.
" + "documentation":"The data format of the values for your attribute.
" }, "DeveloperOnlyAttribute":{ "shape":"BooleanType", @@ -6596,7 +6724,7 @@ }, "Mutable":{ "shape":"BooleanType", - "documentation":"Specifies whether the value of the attribute can be changed.
For any user pool attribute that is mapped to an IdP attribute, you must set this parameter to true. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see Specifying Identity Provider Attribute Mappings for Your User Pool.
Specifies whether the value of the attribute can be changed.
Any user pool attribute whose value you map from an IdP attribute must be mutable, with a parameter value of true. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see Specifying Identity Provider Attribute Mappings for Your User Pool.
Specifies the constraints for an attribute of the string type.
" } }, - "documentation":"Contains information about the schema attribute.
" + "documentation":"A list of the user attributes and their properties in your user pool. The attribute schema contains standard attributes, custom attributes with a custom: prefix, and developer attributes with a dev: prefix. For more information, see User pool attributes.
Developer-only attributes are a legacy feature of user pools, are read-only to all app clients. You can create and update developer-only attributes only with IAM-authenticated API operations. Use app client read/write permissions instead.
" }, "SchemaAttributesListType":{ "type":"list", @@ -6665,8 +6793,33 @@ "SessionType":{ "type":"string", "max":2048, - "min":20, - "sensitive":true + "min":20 + }, + "SetLogDeliveryConfigurationRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "LogConfigurations" + ], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"The ID of the user pool where you want to configure detailed activity logging .
" + }, + "LogConfigurations":{ + "shape":"LogConfigurationListType", + "documentation":"A collection of all of the detailed activity logging configurations for a user pool.
" + } + } + }, + "SetLogDeliveryConfigurationResponse":{ + "type":"structure", + "members":{ + "LogDeliveryConfiguration":{ + "shape":"LogDeliveryConfigurationType", + "documentation":"The detailed activity logging configuration that you applied to the requested user pool.
" + } + } }, "SetRiskConfigurationRequest":{ "type":"structure", @@ -6947,8 +7100,7 @@ "type":"string", "max":6, "min":6, - "pattern":"[0-9]+", - "sensitive":true + "pattern":"[0-9]+" }, "SoftwareTokenMfaConfigType":{ "type":"structure", @@ -7051,7 +7203,11 @@ }, "documentation":"The constraints associated with a string attribute.
" }, - "StringType":{"type":"string"}, + "StringType":{ + "type":"string", + "max":131072, + "min":0 + }, "SupportedIdentityProvidersListType":{ "type":"list", "member":{"shape":"ProviderNameType"} @@ -7112,15 +7268,15 @@ "members":{ "AccessToken":{ "shape":"TimeUnitsType", - "documentation":" A time unit of seconds, minutes, hours, or days for the value that you set in the AccessTokenValidity parameter. The default AccessTokenValidity time unit is hours.
A time unit of seconds, minutes, hours, or days for the value that you set in the AccessTokenValidity parameter. The default AccessTokenValidity time unit is hours. AccessTokenValidity duration can range from five minutes to one day.
A time unit of seconds, minutes, hours, or days for the value that you set in the IdTokenValidity parameter. The default IdTokenValidity time unit is hours.
A time unit of seconds, minutes, hours, or days for the value that you set in the IdTokenValidity parameter. The default IdTokenValidity time unit is hours. IdTokenValidity duration can range from five minutes to one day.
A time unit of seconds, minutes, hours, or days for the value that you set in the RefreshTokenValidity parameter. The default RefreshTokenValidity time unit is days.
A time unit of seconds, minutes, hours, or days for the value that you set in the RefreshTokenValidity parameter. The default RefreshTokenValidity time unit is days. RefreshTokenValidity duration can range from 60 minutes to 10 years.
The data type TokenValidityUnits specifies the time units you use when you set the duration of ID, access, and refresh tokens.
" @@ -7172,11 +7328,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"The last-modified date for the UI customization.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "CreationDate":{ "shape":"DateType", - "documentation":"The creation date for the UI customization.
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" } }, "documentation":"A container for the UI customization information for a user pool's built-in app UI.
" @@ -7285,7 +7441,7 @@ }, "FeedbackValue":{ "shape":"FeedbackValueType", - "documentation":"The authentication event feedback value.
" + "documentation":"The authentication event feedback value. When you provide a FeedbackValue value of valid, you tell Amazon Cognito that you trust a user session where Amazon Cognito has evaluated some level of risk. When you provide a FeedbackValue value of invalid, you tell Amazon Cognito that you don't trust a user session, or you don't believe that Amazon Cognito evaluated a high-enough risk level.
The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.
For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.
The default time unit for AccessTokenValidity in an API request is hours. Valid range is displayed below in seconds.
If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.
" + "documentation":"The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.
For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.
The default time unit for IdTokenValidity in an API request is hours. Valid range is displayed below in seconds.
If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.
" }, "TokenValidityUnits":{ "shape":"TokenValidityUnitsType", - "documentation":"The units in which the validity times are represented. The default unit for RefreshToken is days, and the default for ID and access tokens is hours.
" + "documentation":"The time units you use when you set the duration of ID, access, and refresh tokens. The default unit for RefreshToken is days, and the default for ID and access tokens is hours.
" }, "ReadAttributes":{ "shape":"ClientPermissionListType", @@ -7540,7 +7696,7 @@ }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", - "documentation":"Set to true if the client is allowed to follow the OAuth protocol when interacting with Amazon Cognito user pools.
" + "documentation":"Set to true to use OAuth 2.0 features in your user pool app client.
AllowedOAuthFlowsUserPoolClient must be true before you can configure the following features in your app client.
CallBackURLs: Callback URLs.
LogoutURLs: Sign-out redirect URLs.
AllowedOAuthScopes: OAuth 2.0 scopes.
AllowedOAuthFlows: Support for authorization code, implicit, and client credentials OAuth 2.0 grants.
To use OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient or UpdateUserPoolClient API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient in a request with the CLI or SDKs, it defaults to false.
Enables advanced security risk detection. Set the key AdvancedSecurityMode to the value \"AUDIT\".
User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.
For more information, see Adding advanced security to a user pool.
" }, "AccountRecoverySetting":{ "shape":"AccountRecoverySettingType", @@ -7702,7 +7858,7 @@ "members":{ "AttributesRequireVerificationBeforeUpdate":{ "shape":"AttributesRequireVerificationBeforeUpdateType", - "documentation":"Requires that your user verifies their email address, phone number, or both before Amazon Cognito updates the value of that attribute. When you update a user attribute that has this option activated, Amazon Cognito sends a verification message to the new phone number or email address. Amazon Cognito doesn’t change the value of the attribute until your user responds to the verification message and confirms the new value.
You can verify an updated email address or phone number with a VerifyUserAttribute API request. You can also call the UpdateUserAttributes or AdminUpdateUserAttributes API and set email_verified or phone_number_verified to true.
When AttributesRequireVerificationBeforeUpdate is false, your user pool doesn't require that your users verify attribute changes before Amazon Cognito updates them. In a user pool where AttributesRequireVerificationBeforeUpdate is false, API operations that change attribute values can immediately update a user’s email or phone_number attribute.
Requires that your user verifies their email address, phone number, or both before Amazon Cognito updates the value of that attribute. When you update a user attribute that has this option activated, Amazon Cognito sends a verification message to the new phone number or email address. Amazon Cognito doesn’t change the value of the attribute until your user responds to the verification message and confirms the new value.
You can verify an updated email address or phone number with a VerifyUserAttribute API request. You can also call the AdminUpdateUserAttributes API and set email_verified or phone_number_verified to true.
When AttributesRequireVerificationBeforeUpdate is false, your user pool doesn't require that your users verify attribute changes before Amazon Cognito updates them. In a user pool where AttributesRequireVerificationBeforeUpdate is false, API operations that change attribute values can immediately update a user’s email or phone_number attribute.
The settings for updates to user attributes. These settings include the property AttributesRequireVerificationBeforeUpdate, a user-pool setting that tells Amazon Cognito how to handle changes to the value of your users' email address and phone number attributes. For more information, see Verifying updates to email addresses and phone numbers.
Encoded device-fingerprint details that your app collected with the Amazon Cognito context data collection library. For more information, see Adding user device and session data to API requests.
" } }, - "documentation":"Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.
", - "sensitive":true + "documentation":"Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.
" }, "UserFilterType":{ "type":"string", @@ -7783,7 +7938,7 @@ }, "CreationDate":{ "shape":"DateType", - "documentation":"The date the user import job was created.
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" }, "StartDate":{ "shape":"DateType", @@ -7877,10 +8032,10 @@ "members":{ "AdvancedSecurityMode":{ "shape":"AdvancedSecurityModeType", - "documentation":"The advanced security mode.
" + "documentation":"The operating mode of advanced security features in your user pool.
" } }, - "documentation":"The user pool add-ons type.
" + "documentation":"User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.
For more information, see Adding advanced security to a user pool.
" }, "UserPoolClientDescription":{ "type":"structure", @@ -7925,11 +8080,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"The date the user pool client was last modified.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "CreationDate":{ "shape":"DateType", - "documentation":"The date the user pool client was created.
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" }, "RefreshTokenValidity":{ "shape":"RefreshTokenValidityType", @@ -7941,7 +8096,7 @@ }, "IdTokenValidity":{ "shape":"IdTokenValidityType", - "documentation":"The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.
For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.
The default time unit for AccessTokenValidity in an API request is hours. Valid range is displayed below in seconds.
If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.
" + "documentation":"The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.
For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.
The default time unit for IdTokenValidity in an API request is hours. Valid range is displayed below in seconds.
If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.
" }, "TokenValidityUnits":{ "shape":"TokenValidityUnitsType", @@ -7985,7 +8140,7 @@ }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", - "documentation":"Set to true if the client is allowed to follow the OAuth protocol when interacting with Amazon Cognito user pools.
", + "documentation":"Set to true to use OAuth 2.0 features in your user pool app client.
AllowedOAuthFlowsUserPoolClient must be true before you can configure the following features in your app client.
CallBackURLs: Callback URLs.
LogoutURLs: Sign-out redirect URLs.
AllowedOAuthScopes: OAuth 2.0 scopes.
AllowedOAuthFlows: Support for authorization code, implicit, and client credentials OAuth 2.0 grants.
To use OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient or UpdateUserPoolClient API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient in a request with the CLI or SDKs, it defaults to false.
The date the user pool description was last modified.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "CreationDate":{ "shape":"DateType", - "documentation":"The date the user pool description was created.
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" } }, "documentation":"A user pool description.
" @@ -8121,15 +8276,15 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"The date the user pool was last modified.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "CreationDate":{ "shape":"DateType", - "documentation":"The date the user pool was created.
" + "documentation":"The date and time, in ISO 8601 format, when the item was created.
" }, "SchemaAttributes":{ "shape":"SchemaAttributesListType", - "documentation":"A container with the schema attributes of a user pool.
" + "documentation":"A list of the user attributes and their properties in your user pool. The attribute schema contains standard attributes, custom attributes with a custom: prefix, and developer attributes with a dev: prefix. For more information, see User pool attributes.
Developer-only attributes are a legacy feature of user pools, are read-only to all app clients. You can create and update developer-only attributes only with IAM-authenticated API operations. Use app client read/write permissions instead.
" }, "AutoVerifiedAttributes":{ "shape":"VerifiedAttributesListType", @@ -8181,7 +8336,7 @@ }, "EmailConfiguration":{ "shape":"EmailConfigurationType", - "documentation":"The email configuration of your user pool. The email configuration type sets your preferred sending method, Amazon Web Services Region, and sender for messages tfrom your user pool.
" + "documentation":"The email configuration of your user pool. The email configuration type sets your preferred sending method, Amazon Web Services Region, and sender for messages from your user pool.
" }, "SmsConfiguration":{ "shape":"SmsConfigurationType", @@ -8193,7 +8348,7 @@ }, "SmsConfigurationFailure":{ "shape":"StringType", - "documentation":"The reason why the SMS configuration can't send the messages to your users.
This message might include comma-separated values to describe why your SMS configuration can't send messages to user pool end users.
The Identity and Access Management role that Amazon Cognito uses to send SMS messages isn't properly configured. For more information, see SmsConfigurationType.
The Amazon Web Services account is in the SNS SMS Sandbox and messages will only reach verified end users. This parameter won’t get populated with SNSSandbox if the IAM user creating the user pool doesn’t have SNS permissions. To learn how to move your Amazon Web Services account out of the sandbox, see Moving out of the SMS sandbox.
The reason why the SMS configuration can't send the messages to your users.
This message might include comma-separated values to describe why your SMS configuration can't send messages to user pool end users.
The Identity and Access Management role that Amazon Cognito uses to send SMS messages isn't properly configured. For more information, see SmsConfigurationType.
The Amazon Web Services account is in the SNS SMS Sandbox and messages will only reach verified end users. This parameter won’t get populated with SNSSandbox if the user creating the user pool doesn’t have SNS permissions. To learn how to move your Amazon Web Services account out of the sandbox, see Moving out of the SMS sandbox.
The user pool add-ons.
" + "documentation":"User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.
For more information, see Adding advanced security to a user pool.
" }, "UsernameConfiguration":{ "shape":"UsernameConfigurationType", @@ -8259,7 +8414,7 @@ }, "UserLastModifiedDate":{ "shape":"DateType", - "documentation":"The last modified date of the user.
" + "documentation":"The date and time, in ISO 8601 format, when the item was modified.
" }, "Enabled":{ "shape":"BooleanType", @@ -8267,7 +8422,7 @@ }, "UserStatus":{ "shape":"UserStatusType", - "documentation":"The user status. This can be one of the following:
UNCONFIRMED - User has been created but not confirmed.
CONFIRMED - User has been confirmed.
EXTERNAL_PROVIDER - User signed in with a third-party IdP.
ARCHIVED - User is no longer active.
UNKNOWN - User status isn't known.
RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in.
FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else.
The user status. This can be one of the following:
UNCONFIRMED - User has been created but not confirmed.
CONFIRMED - User has been confirmed.
EXTERNAL_PROVIDER - User signed in with a third-party IdP.
UNKNOWN - User status isn't known.
RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in.
FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else.
Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs.
Valid values include:
Enables case sensitivity for all username input. When this option is set to True, users must sign in using the exact capitalization of their given username, such as “UserName”. This is the default value.
Enables case insensitivity for all username input. For example, when this option is set to False, users can sign in using either \"username\" or \"Username\". This option also enables both preferred_username and email alias to be case insensitive, in addition to the username attribute.
Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to False (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.
Valid values include:
Enables case sensitivity for all username input. When this option is set to True, users must sign in using the exact capitalization of their given username, such as “UserName”. This is the default value.
Enables case insensitivity for all username input. For example, when this option is set to False, users can sign in using username, USERNAME, or UserName. This option also enables both preferred_username and email alias to be case insensitive, in addition to the username attribute.
The username configuration type.
" @@ -8434,5 +8589,5 @@ }, "WrappedBooleanType":{"type":"boolean"} }, - "documentation":"Using the Amazon Cognito user pools API, you can create a user pool to manage directories and users. You can authenticate a user to obtain tokens related to user identity and access policies.
This API reference provides information about user pools in Amazon Cognito user pools.
For more information, see the Amazon Cognito Documentation.
" + "documentation":"With the Amazon Cognito user pools API, you can set up user pools and app clients, and authenticate users. To authenticate users from third-party identity providers (IdPs) in this API, you can link IdP users to native user profiles. Learn more about the authentication and authorization of federated users in the Using the Amazon Cognito user pools API and user pool endpoints.
This API reference provides detailed information about API operations and object types in Amazon Cognito. At the bottom of the page for each API operation and object, under See Also, you can learn how to use it in an Amazon Web Services SDK in the language of your choice.
Along with resource management operations, the Amazon Cognito user pools API includes classes of operations and authorization models for client-side and server-side user operations. For more information, see Using the Amazon Cognito native and OIDC APIs in the Amazon Cognito Developer Guide.
You can also start reading about the CognitoIdentityProvider client in the following SDK guides.
To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services SDKs.
" } From 70b97be805f535e38efbda072720fcbc52eca806 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 2 Aug 2023 18:05:13 +0000 Subject: [PATCH 093/270] AWS Glue Update: This release includes additional Glue Streaming KAKFA SASL property types. --- .../next-release/feature-AWSGlue-127b740.json | 6 + .../codegen-resources/endpoint-rule-set.json | 344 ++++++++---------- .../codegen-resources/service-2.json | 13 +- 3 files changed, 171 insertions(+), 192 deletions(-) create mode 100644 .changes/next-release/feature-AWSGlue-127b740.json diff --git a/.changes/next-release/feature-AWSGlue-127b740.json b/.changes/next-release/feature-AWSGlue-127b740.json new file mode 100644 index 000000000000..ffc952dfe818 --- /dev/null +++ b/.changes/next-release/feature-AWSGlue-127b740.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "This release includes additional Glue Streaming KAKFA SASL property types." +} diff --git a/services/glue/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/glue/src/main/resources/codegen-resources/endpoint-rule-set.json index 477f7b05b763..d1bd27f670ab 100644 --- a/services/glue/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/glue/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://glue-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://glue-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://glue-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://glue-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://glue.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://glue.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://glue.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://glue.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 29530617b177..5084c09af879 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -5843,7 +5843,7 @@ }, "ConnectionProperties":{ "shape":"ConnectionProperties", - "documentation":"These key-value pairs define parameters for the connection:
HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".
PASSWORD - A password, if one is used, for the user name.
ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
JDBC_ENGINE - The name of the JDBC engine to use.
JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
CONFIG_FILES - (Reserved for future use.)
INSTANCE_ID - The instance ID to use.
JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.
CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
SECRET_ID - The secret ID used for the secret manager of credentials.
CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.
CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.
CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.
KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".
KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.
KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".
KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).
KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).
KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).
ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).
ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", or \"AWS_MSK_IAM\". These are the supported SASL Mechanisms.
KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.
KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.
ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.
KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.
KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.
KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.
These key-value pairs define parameters for the connection:
HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".
PASSWORD - A password, if one is used, for the user name.
ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.
JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.
JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
JDBC_ENGINE - The name of the JDBC engine to use.
JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
CONFIG_FILES - (Reserved for future use.)
INSTANCE_ID - The instance ID to use.
JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.
CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.
SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.
CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.
CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
SECRET_ID - The secret ID used for the secret manager of credentials.
CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.
CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.
CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.
KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.
KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".
KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.
KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".
KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).
KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).
KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).
ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).
ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", or \"AWS_MSK_IAM\". These are the supported SASL Mechanisms.
KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.
KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.
ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).
KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.
KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.
KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.
KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.
KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.
Gets the status of Service Catalog in SageMaker. Service Catalog is used to create SageMaker projects.
" }, + "GetScalingConfigurationRecommendation":{ + "name":"GetScalingConfigurationRecommendation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetScalingConfigurationRecommendationRequest"}, + "output":{"shape":"GetScalingConfigurationRecommendationResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"Starts an Amazon SageMaker Inference Recommender autoscaling recommendation job. Returns recommendations for autoscaling policies that you can apply to your SageMaker endpoint.
" + }, "GetSearchSuggestions":{ "name":"GetSearchSuggestions", "http":{ @@ -9650,6 +9663,24 @@ "min":1, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)${1,256}" }, + "CustomizedMetricSpecification":{ + "type":"structure", + "members":{ + "MetricName":{ + "shape":"String", + "documentation":"The name of the customized metric.
" + }, + "Namespace":{ + "shape":"String", + "documentation":"The namespace of the customized metric.
" + }, + "Statistic":{ + "shape":"Statistic", + "documentation":"The statistic of the customized metric.
" + } + }, + "documentation":"A customized metric.
" + }, "DataCaptureConfig":{ "type":"structure", "required":[ @@ -15279,6 +15310,7 @@ "Delete_Failed" ] }, + "Double":{"type":"double"}, "DoubleParameterValue":{"type":"double"}, "DriftCheckBaselines":{ "type":"structure", @@ -15362,6 +15394,32 @@ }, "documentation":"Represents the drift check model quality baselines that can be used when the model monitor is set using the model package.
" }, + "DynamicScalingConfiguration":{ + "type":"structure", + "members":{ + "MinCapacity":{ + "shape":"Integer", + "documentation":"The recommended minimum capacity to specify for your autoscaling policy.
" + }, + "MaxCapacity":{ + "shape":"Integer", + "documentation":"The recommended maximum capacity to specify for your autoscaling policy.
" + }, + "ScaleInCooldown":{ + "shape":"Integer", + "documentation":"The recommended scale in cooldown time for your autoscaling policy.
" + }, + "ScaleOutCooldown":{ + "shape":"Integer", + "documentation":"The recommended scale out cooldown time for your autoscaling policy.
" + }, + "ScalingPolicies":{ + "shape":"ScalingPolicies", + "documentation":"An object of the scaling policies for each metric.
" + } + }, + "documentation":"An object with the recommended values for you to specify when creating an autoscaling policy.
" + }, "EMRStepMetadata":{ "type":"structure", "members":{ @@ -17149,6 +17207,65 @@ } } }, + "GetScalingConfigurationRecommendationRequest":{ + "type":"structure", + "required":["InferenceRecommendationsJobName"], + "members":{ + "InferenceRecommendationsJobName":{ + "shape":"RecommendationJobName", + "documentation":"The name of a previously completed Inference Recommender job.
" + }, + "RecommendationId":{ + "shape":"String", + "documentation":"The recommendation ID of a previously completed inference recommendation. This ID should come from one of the recommendations returned by the job specified in the InferenceRecommendationsJobName field.
Specify either this field or the EndpointName field.
The name of an endpoint benchmarked during a previously completed inference recommendation job. This name should come from one of the recommendations returned by the job specified in the InferenceRecommendationsJobName field.
Specify either this field or the RecommendationId field.
The percentage of how much utilization you want an instance to use before autoscaling. The default value is 50%.
" + }, + "ScalingPolicyObjective":{ + "shape":"ScalingPolicyObjective", + "documentation":"An object where you specify the anticipated traffic pattern for an endpoint.
" + } + } + }, + "GetScalingConfigurationRecommendationResponse":{ + "type":"structure", + "members":{ + "InferenceRecommendationsJobName":{ + "shape":"RecommendationJobName", + "documentation":"The name of a previously completed Inference Recommender job.
" + }, + "RecommendationId":{ + "shape":"String", + "documentation":"The recommendation ID of a previously completed inference recommendation.
" + }, + "EndpointName":{ + "shape":"EndpointName", + "documentation":"The name of an endpoint benchmarked during a previously completed Inference Recommender job.
" + }, + "TargetCpuUtilizationPerCore":{ + "shape":"UtilizationPercentagePerCore", + "documentation":"The percentage of how much utilization you want an instance to use before autoscaling, which you specified in the request. The default value is 50%.
" + }, + "ScalingPolicyObjective":{ + "shape":"ScalingPolicyObjective", + "documentation":"An object representing the anticipated traffic pattern for an endpoint that you specified in the request.
" + }, + "Metric":{ + "shape":"ScalingPolicyMetric", + "documentation":"An object with a list of metrics that were benchmarked during the previously completed Inference Recommender job.
" + }, + "DynamicScalingConfiguration":{ + "shape":"DynamicScalingConfiguration", + "documentation":"An object with the recommended values for you to specify when creating an autoscaling policy.
" + } + } + }, "GetSearchSuggestionsRequest":{ "type":"structure", "required":["Resource"], @@ -23804,6 +23921,21 @@ "Test" ] }, + "MetricSpecification":{ + "type":"structure", + "members":{ + "Predefined":{ + "shape":"PredefinedMetricSpecification", + "documentation":"Information about a predefined metric.
" + }, + "Customized":{ + "shape":"CustomizedMetricSpecification", + "documentation":"Information about a customized metric.
" + } + }, + "documentation":"An object containing information about a metric.
", + "union":true + }, "MetricValue":{"type":"float"}, "MetricsSource":{ "type":"structure", @@ -27433,6 +27565,16 @@ "min":1, "pattern":".*" }, + "PredefinedMetricSpecification":{ + "type":"structure", + "members":{ + "PredefinedMetricType":{ + "shape":"String", + "documentation":"The metric type. You can only apply SageMaker metric types to SageMaker endpoints.
" + } + }, + "documentation":"A specification for a predefined metric.
" + }, "PresignedDomainUrl":{"type":"string"}, "ProbabilityThresholdAttribute":{"type":"double"}, "ProblemType":{ @@ -30010,6 +30152,49 @@ "max":100, "min":0 }, + "ScalingPolicies":{ + "type":"list", + "member":{"shape":"ScalingPolicy"} + }, + "ScalingPolicy":{ + "type":"structure", + "members":{ + "TargetTracking":{ + "shape":"TargetTrackingScalingPolicyConfiguration", + "documentation":"A target tracking scaling policy. Includes support for predefined or customized metrics.
" + } + }, + "documentation":"An object containing a recommended scaling policy.
", + "union":true + }, + "ScalingPolicyMetric":{ + "type":"structure", + "members":{ + "InvocationsPerInstance":{ + "shape":"Integer", + "documentation":"The number of invocations sent to a model, normalized by InstanceCount in each ProductionVariant. 1/numberOfInstances is sent as the value on each request, where numberOfInstances is the number of active instances for the ProductionVariant behind the endpoint at the time of the request.
The interval of time taken by a model to respond as viewed from SageMaker. This interval includes the local communication times taken to send the request and to fetch the response from the container of a model and the time taken to complete the inference in the container.
" + } + }, + "documentation":"The metric for a scaling policy.
" + }, + "ScalingPolicyObjective":{ + "type":"structure", + "members":{ + "MinInvocationsPerMinute":{ + "shape":"Integer", + "documentation":"The minimum number of expected requests to your endpoint per minute.
" + }, + "MaxInvocationsPerMinute":{ + "shape":"Integer", + "documentation":"The maximum number of expected requests to your endpoint per minute.
" + } + }, + "documentation":"An object where you specify the anticipated traffic pattern for an endpoint.
" + }, "ScheduleConfig":{ "type":"structure", "required":["ScheduleExpression"], @@ -30853,6 +31038,16 @@ } } }, + "Statistic":{ + "type":"string", + "enum":[ + "Average", + "Minimum", + "Maximum", + "SampleCount", + "Sum" + ] + }, "StatusDetails":{ "type":"string", "max":1024, @@ -31450,6 +31645,20 @@ "LINUX" ] }, + "TargetTrackingScalingPolicyConfiguration":{ + "type":"structure", + "members":{ + "MetricSpecification":{ + "shape":"MetricSpecification", + "documentation":"An object containing information about a metric.
" + }, + "TargetValue":{ + "shape":"Double", + "documentation":"The recommended target value to specify for the metric when creating a scaling policy.
" + } + }, + "documentation":"A target tracking scaling policy. Includes support for predefined or customized metrics.
When using the PutScalingPolicy API, this parameter is required when you are creating a policy with the policy type TargetTrackingScaling.
The name of a budget. The name must be unique within an account. The : and \\ characters aren't allowed in BudgetName.
The name of a budget. The name must be unique within an account. The : and \\ characters, and the \"/action/\" substring, aren't allowed in BudgetName.
A string that represents the budget name. The \":\" and \"\\\" characters aren't allowed.
", + "documentation":"A string that represents the budget name. The \":\" and \"\\\" characters, and the \"/action/\" substring, aren't allowed.
", "max":100, "min":1, - "pattern":"[^:\\\\]+" + "pattern":"^(?![^:\\\\]*/action/)[^:\\\\]+$" }, "BudgetNotificationsForAccount":{ "type":"structure", @@ -1908,7 +1908,7 @@ "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"The number of API requests has exceeded the maximum allowed API request throttling limit for the account.
", + "documentation":"The number of API requests has exceeded the maximum allowed API request throttling limit for the account.
", "exception":true }, "TimePeriod":{ From 97afb7ba23b6f2953f71d5c33b1fdb4ed3aeae1a Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 2 Aug 2023 18:07:12 +0000 Subject: [PATCH 096/270] Updated endpoints.json and partitions.json. --- .changes/next-release/feature-AWSSDKforJavav2-0443982.json | 6 ++++++ .../amazon/awssdk/regions/internal/region/endpoints.json | 7 +++++++ 2 files changed, 13 insertions(+) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json new file mode 100644 index 000000000000..e5b5ee3ca5e3 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." +} diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 77b5d0162f2d..1e96eeb5812d 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -9547,9 +9547,11 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -9587,6 +9589,7 @@ "deprecated" : true, "hostname" : "lakeformation-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -14486,9 +14489,11 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", @@ -14496,8 +14501,10 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, From fe6891f7e097ddcc205f7a8962569dac267f8424 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Wed, 2 Aug 2023 18:08:22 +0000 Subject: [PATCH 097/270] Release 2.20.118. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.118.json | 42 +++++++++++++++++++ .../feature-AWSBudgets-41849be.json | 6 --- .../next-release/feature-AWSGlue-127b740.json | 6 --- .../feature-AWSResilienceHub-801f5b0.json | 6 --- .../feature-AWSSDKforJavav2-0443982.json | 6 --- ...AmazonCognitoIdentityProvider-efdf6de.json | 6 --- ...eature-AmazonSageMakerService-e727348.json | 6 --- CHANGELOG.md | 27 +++++++++++- README.md | 8 ++-- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- 425 files changed, 488 insertions(+), 457 deletions(-) create mode 100644 .changes/2.20.118.json delete mode 100644 .changes/next-release/feature-AWSBudgets-41849be.json delete mode 100644 .changes/next-release/feature-AWSGlue-127b740.json delete mode 100644 .changes/next-release/feature-AWSResilienceHub-801f5b0.json delete mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json delete mode 100644 .changes/next-release/feature-AmazonCognitoIdentityProvider-efdf6de.json delete mode 100644 .changes/next-release/feature-AmazonSageMakerService-e727348.json diff --git a/.changes/2.20.118.json b/.changes/2.20.118.json new file mode 100644 index 000000000000..f9c9494a13d2 --- /dev/null +++ b/.changes/2.20.118.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.118", + "date": "2023-08-02", + "entries": [ + { + "type": "feature", + "category": "AWS Budgets", + "contributor": "", + "description": "As part of CAE tagging integration we need to update our budget names regex filter to prevent customers from using \"/action/\" in their budget names." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "This release includes additional Glue Streaming KAKFA SASL property types." + }, + { + "type": "feature", + "category": "AWS Resilience Hub", + "contributor": "", + "description": "Drift Detection capability added when applications policy has moved from a meet to breach state. Customers will be able to exclude operational recommendations and receive credit in their resilience score. Customers can now add ARH permissions to an existing or new role." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "New feature that logs Cognito user pool error messages to CloudWatch logs." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "SageMaker Inference Recommender introduces a new API GetScalingConfigurationRecommendation to recommend auto scaling policies based on completed Inference Recommender jobs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AWSBudgets-41849be.json b/.changes/next-release/feature-AWSBudgets-41849be.json deleted file mode 100644 index 294683d9777d..000000000000 --- a/.changes/next-release/feature-AWSBudgets-41849be.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Budgets", - "contributor": "", - "description": "As part of CAE tagging integration we need to update our budget names regex filter to prevent customers from using \"/action/\" in their budget names." -} diff --git a/.changes/next-release/feature-AWSGlue-127b740.json b/.changes/next-release/feature-AWSGlue-127b740.json deleted file mode 100644 index ffc952dfe818..000000000000 --- a/.changes/next-release/feature-AWSGlue-127b740.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Glue", - "contributor": "", - "description": "This release includes additional Glue Streaming KAKFA SASL property types." -} diff --git a/.changes/next-release/feature-AWSResilienceHub-801f5b0.json b/.changes/next-release/feature-AWSResilienceHub-801f5b0.json deleted file mode 100644 index b9d29b12921d..000000000000 --- a/.changes/next-release/feature-AWSResilienceHub-801f5b0.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Resilience Hub", - "contributor": "", - "description": "Drift Detection capability added when applications policy has moved from a meet to breach state. Customers will be able to exclude operational recommendations and receive credit in their resilience score. Customers can now add ARH permissions to an existing or new role." -} diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json deleted file mode 100644 index e5b5ee3ca5e3..000000000000 --- a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS SDK for Java v2", - "contributor": "", - "description": "Updated endpoint and partition metadata." -} diff --git a/.changes/next-release/feature-AmazonCognitoIdentityProvider-efdf6de.json b/.changes/next-release/feature-AmazonCognitoIdentityProvider-efdf6de.json deleted file mode 100644 index c3ddcfbaf6e0..000000000000 --- a/.changes/next-release/feature-AmazonCognitoIdentityProvider-efdf6de.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Cognito Identity Provider", - "contributor": "", - "description": "New feature that logs Cognito user pool error messages to CloudWatch logs." -} diff --git a/.changes/next-release/feature-AmazonSageMakerService-e727348.json b/.changes/next-release/feature-AmazonSageMakerService-e727348.json deleted file mode 100644 index a24e6afc7f8e..000000000000 --- a/.changes/next-release/feature-AmazonSageMakerService-e727348.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon SageMaker Service", - "contributor": "", - "description": "SageMaker Inference Recommender introduces a new API GetScalingConfigurationRecommendation to recommend auto scaling policies based on completed Inference Recommender jobs." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 140390d38bd7..fa87c0716bae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +# __2.20.118__ __2023-08-02__ +## __AWS Budgets__ + - ### Features + - As part of CAE tagging integration we need to update our budget names regex filter to prevent customers from using "/action/" in their budget names. + +## __AWS Glue__ + - ### Features + - This release includes additional Glue Streaming KAKFA SASL property types. + +## __AWS Resilience Hub__ + - ### Features + - Drift Detection capability added when applications policy has moved from a meet to breach state. Customers will be able to exclude operational recommendations and receive credit in their resilience score. Customers can now add ARH permissions to an existing or new role. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Cognito Identity Provider__ + - ### Features + - New feature that logs Cognito user pool error messages to CloudWatch logs. + +## __Amazon SageMaker Service__ + - ### Features + - SageMaker Inference Recommender introduces a new API GetScalingConfigurationRecommendation to recommend auto scaling policies based on completed Inference Recommender jobs. + # __2.20.117__ __2023-08-01__ ## __AWS Batch__ - ### Features @@ -973,7 +998,7 @@ Special thanks to the following contributors to this release: ## __Contributors__ Special thanks to the following contributors to this release: -[@breader124](https://github.com/breader124), [@bmaizels](https://github.com/bmaizels) +[@bmaizels](https://github.com/bmaizels), [@breader124](https://github.com/breader124) # __2.20.85__ __2023-06-13__ ## __AWS CloudTrail__ - ### Features diff --git a/README.md b/README.md index 3ab1dbc5a303..36782ed23998 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same verUnique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.
", "idempotencyToken":true + }, + "EnablePrimaryIpv6":{ + "shape":"Boolean", + "documentation":"If you’re creating a network interface in a dual-stack or IPv6-only subnet, you have the option to assign a primary IPv6 IP address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if the instance that this ENI will be attached to relies on its IPv6 address not changing. Amazon Web Services will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address.
" } } }, @@ -25992,7 +25996,7 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"The ARN of the Outpost on which the snapshot is stored.
This parameter is only supported on BlockDeviceMapping objects called by CreateImage.
The ARN of the Outpost on which the snapshot is stored.
This parameter is not supported when using CreateImage.
", "locationName":"outpostArn" }, "Encrypted":{ @@ -33598,6 +33602,11 @@ "shape":"String", "documentation":"The IPv6 address.
", "locationName":"ipv6Address" + }, + "IsPrimaryIpv6":{ + "shape":"Boolean", + "documentation":"Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see RunInstances.
", + "locationName":"isPrimaryIpv6" } }, "documentation":"Describes an IPv6 address.
" @@ -34084,6 +34093,10 @@ "Ipv6PrefixCount":{ "shape":"Integer", "documentation":"The number of IPv6 delegated prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix option.
The primary IPv6 address of the network interface. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information about primary IPv6 addresses, see RunInstances.
" } }, "documentation":"Describes a network interface.
" @@ -37999,6 +38012,11 @@ "shape":"Integer", "documentation":"The number of IPv6 prefixes that Amazon Web Services automatically assigned to the network interface.
", "locationName":"ipv6PrefixCount" + }, + "PrimaryIpv6":{ + "shape":"Boolean", + "documentation":"The primary IPv6 address of the network interface. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information about primary IPv6 addresses, see RunInstances.
", + "locationName":"primaryIpv6" } }, "documentation":"Describes a network interface.
" @@ -38091,6 +38109,10 @@ "Ipv6PrefixCount":{ "shape":"Integer", "documentation":"The number of IPv6 prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix option.
The primary IPv6 address of the network interface. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information about primary IPv6 addresses, see RunInstances.
" } }, "documentation":"The parameters for a network interface.
" @@ -40705,6 +40727,10 @@ "EnaSrdSpecification":{ "shape":"EnaSrdSpecification", "documentation":"Updates the ENA Express configuration for the network interface that’s attached to the instance.
" + }, + "EnablePrimaryIpv6":{ + "shape":"Boolean", + "documentation":"If you’re modifying a network interface in a dual-stack or IPv6-only subnet, you have the option to assign a primary IPv6 IP address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if the instance that this ENI will be attached to relies on its IPv6 address not changing. Amazon Web Services will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address.
" } }, "documentation":"Contains the parameters for ModifyNetworkInterfaceAttribute.
" @@ -43529,6 +43555,11 @@ "shape":"String", "documentation":"The IPv6 address.
", "locationName":"ipv6Address" + }, + "IsPrimaryIpv6":{ + "shape":"Boolean", + "documentation":"Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see ModifyNetworkInterfaceAttribute.
", + "locationName":"isPrimaryIpv6" } }, "documentation":"Describes an IPv6 address associated with a network interface.
" @@ -49253,6 +49284,10 @@ "DisableApiStop":{ "shape":"Boolean", "documentation":"Indicates whether an instance is enabled for stop protection. For more information, see Stop protection.
" + }, + "EnablePrimaryIpv6":{ + "shape":"Boolean", + "documentation":"If you’re launching an instance into a dual-stack or IPv6-only subnet, you can enable assigning a primary IPv6 address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if an instance relies on its IPv6 address not changing. When you launch the instance, Amazon Web Services will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address.
" } } }, From 1329091e583d4dd727f0cfaa620153cc672a7b91 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Thu, 3 Aug 2023 22:25:44 +0000 Subject: [PATCH 104/270] Auto Scaling Update: Documentation changes related to Amazon EC2 Auto Scaling APIs. --- .changes/next-release/feature-AutoScaling-4b12bd1.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-AutoScaling-4b12bd1.json diff --git a/.changes/next-release/feature-AutoScaling-4b12bd1.json b/.changes/next-release/feature-AutoScaling-4b12bd1.json new file mode 100644 index 000000000000..dde4ad66197c --- /dev/null +++ b/.changes/next-release/feature-AutoScaling-4b12bd1.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Documentation changes related to Amazon EC2 Auto Scaling APIs." +} diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index 627dd69f7451..bb8f13080be5 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -1237,7 +1237,7 @@ "members":{ "Alarms":{ "shape":"AlarmList", - "documentation":"The names of one or more CloudWatch alarms to monitor for the instance refresh.
" + "documentation":"The names of one or more CloudWatch alarms to monitor for the instance refresh. You can specify up to 10 alarms.
" } }, "documentation":"Specifies the CloudWatch alarm specification to use in an instance refresh.
" From c49b6daf4c313b824ba3a74ebbb284a9f25e68f5 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Thu, 3 Aug 2023 22:26:25 +0000 Subject: [PATCH 105/270] AWS Database Migration Service Update: The release makes public API for DMS Schema Conversion feature. --- ...e-AWSDatabaseMigrationService-a8b29f0.json | 6 + .../codegen-resources/endpoint-rule-set.json | 438 ++-- .../codegen-resources/paginators-1.json | 45 + .../codegen-resources/service-2.json | 1952 ++++++++++++++++- 4 files changed, 2098 insertions(+), 343 deletions(-) create mode 100644 .changes/next-release/feature-AWSDatabaseMigrationService-a8b29f0.json diff --git a/.changes/next-release/feature-AWSDatabaseMigrationService-a8b29f0.json b/.changes/next-release/feature-AWSDatabaseMigrationService-a8b29f0.json new file mode 100644 index 000000000000..781bf04acaea --- /dev/null +++ b/.changes/next-release/feature-AWSDatabaseMigrationService-a8b29f0.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "The release makes public API for DMS Schema Conversion feature." +} diff --git a/services/databasemigration/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/databasemigration/src/main/resources/codegen-resources/endpoint-rule-set.json index c1910361de38..0ee72506b094 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/databasemigration/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://dms-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://dms-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,205 +225,165 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://dms.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://dms.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-iso", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://dms.{Region}.c2s.ic.gov", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + "fn": "stringEquals", + "argv": [ + "aws-iso", { - "conditions": [ + "fn": "getAttr", + "argv": [ { - "fn": "stringEquals", - "argv": [ - "aws-iso-b", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://dms.{Region}.sc2s.sgov.gov", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://dms-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "PartitionResult" + }, + "name" + ] } ] } - ] + ], + "endpoint": { + "url": "https://dms.{Region}.c2s.ic.gov", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-iso-b", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://dms.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://dms.{Region}.sc2s.sgov.gov", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://dms-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://dms.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://dms.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://dms.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json b/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json index 36ac78fa4eba..9a2032752284 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json +++ b/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json @@ -15,6 +15,11 @@ "output_token": "Marker", "limit_key": "MaxRecords" }, + "DescribeDataProviders": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeEndpointSettings": { "input_token": "Marker", "output_token": "Marker", @@ -45,6 +50,11 @@ "output_token": "Marker", "limit_key": "MaxRecords" }, + "DescribeExtensionPackAssociations": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeFleetAdvisorCollectors": { "input_token": "NextToken", "output_token": "NextToken", @@ -70,6 +80,41 @@ "output_token": "NextToken", "limit_key": "MaxRecords" }, + "DescribeInstanceProfiles": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelAssessments": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelConversions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelExportsAsScript": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelExportsToTarget": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelImports": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMigrationProjects": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeOrderableReplicationInstances": { "input_token": "Marker", "output_token": "Marker", diff --git a/services/databasemigration/src/main/resources/codegen-resources/service-2.json b/services/databasemigration/src/main/resources/codegen-resources/service-2.json index 87494e589dc0..3250f07993db 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/service-2.json +++ b/services/databasemigration/src/main/resources/codegen-resources/service-2.json @@ -68,6 +68,21 @@ ], "documentation":"Cancels a single premigration assessment run.
This operation prevents any individual assessments from running if they haven't started running. It also attempts to cancel any individual assessments that are currently running.
" }, + "CreateDataProvider":{ + "name":"CreateDataProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataProviderMessage"}, + "output":{"shape":"CreateDataProviderResponse"}, + "errors":[ + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceAlreadyExistsFault"} + ], + "documentation":"Creates a data provider using the provided settings. A data provider stores a data store type and location information about your database.
" + }, "CreateEndpoint":{ "name":"CreateEndpoint", "http":{ @@ -126,6 +141,44 @@ ], "documentation":"Creates a Fleet Advisor collector using the specified parameters.
" }, + "CreateInstanceProfile":{ + "name":"CreateInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceProfileMessage"}, + "output":{"shape":"CreateInstanceProfileResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"Creates the instance profile using the specified parameters.
" + }, + "CreateMigrationProject":{ + "name":"CreateMigrationProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMigrationProjectMessage"}, + "output":{"shape":"CreateMigrationProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"Creates the migration project using the specified parameters.
You can run this action only after you create an instance profile and data providers using CreateInstanceProfile and CreateDataProvider.
" + }, "CreateReplicationConfig":{ "name":"CreateReplicationConfig", "http":{ @@ -233,6 +286,21 @@ ], "documentation":"Deletes the connection between a replication instance and an endpoint.
" }, + "DeleteDataProvider":{ + "name":"DeleteDataProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDataProviderMessage"}, + "output":{"shape":"DeleteDataProviderResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"Deletes the specified data provider.
All migration projects associated with the data provider must be deleted or modified before you can delete the data provider.
Deletes the specified Fleet Advisor collector databases.
" }, + "DeleteInstanceProfile":{ + "name":"DeleteInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceProfileMessage"}, + "output":{"shape":"DeleteInstanceProfileResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"Deletes the specified instance profile.
All migration projects associated with the instance profile must be deleted or modified before you can delete the instance profile.
Deletes the specified migration project.
The migration project must be closed before you can delete it.
Describes the status of the connections that have been made between the replication instance and an endpoint. Connections are created when you test an endpoint.
" }, + "DescribeConversionConfiguration":{ + "name":"DescribeConversionConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConversionConfigurationMessage"}, + "output":{"shape":"DescribeConversionConfigurationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Returns configuration parameters for a schema conversion project.
" + }, + "DescribeDataProviders":{ + "name":"DescribeDataProviders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataProvidersMessage"}, + "output":{"shape":"DescribeDataProvidersResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"Returns a paginated list of data providers for your account in the current region.
" + }, "DescribeEndpointSettings":{ "name":"DescribeEndpointSettings", "http":{ @@ -487,6 +612,16 @@ "output":{"shape":"DescribeEventsResponse"}, "documentation":"Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on DMS events, see Working with Events and Notifications in the Database Migration Service User Guide.
" }, + "DescribeExtensionPackAssociations":{ + "name":"DescribeExtensionPackAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExtensionPackAssociationsMessage"}, + "output":{"shape":"DescribeExtensionPackAssociationsResponse"}, + "documentation":"Returns a paginated list of extension pack associations for the specified migration project. An extension pack is an add-on module that emulates functions present in a source database that are required when converting objects to the target database.
" + }, "DescribeFleetAdvisorCollectors":{ "name":"DescribeFleetAdvisorCollectors", "http":{ @@ -552,6 +687,99 @@ ], "documentation":"Returns a list of schemas detected by Fleet Advisor Collectors in your account.
" }, + "DescribeInstanceProfiles":{ + "name":"DescribeInstanceProfiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceProfilesMessage"}, + "output":{"shape":"DescribeInstanceProfilesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"Returns a paginated list of instance profiles for your account in the current region.
" + }, + "DescribeMetadataModelAssessments":{ + "name":"DescribeMetadataModelAssessments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelAssessmentsMessage"}, + "output":{"shape":"DescribeMetadataModelAssessmentsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Returns a paginated list of metadata model assessments for your account in the current region.
" + }, + "DescribeMetadataModelConversions":{ + "name":"DescribeMetadataModelConversions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelConversionsMessage"}, + "output":{"shape":"DescribeMetadataModelConversionsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Returns a paginated list of metadata model conversions for a migration project.
" + }, + "DescribeMetadataModelExportsAsScript":{ + "name":"DescribeMetadataModelExportsAsScript", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelExportsAsScriptMessage"}, + "output":{"shape":"DescribeMetadataModelExportsAsScriptResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Returns a paginated list of metadata model exports.
" + }, + "DescribeMetadataModelExportsToTarget":{ + "name":"DescribeMetadataModelExportsToTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelExportsToTargetMessage"}, + "output":{"shape":"DescribeMetadataModelExportsToTargetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Returns a paginated list of metadata model exports.
" + }, + "DescribeMetadataModelImports":{ + "name":"DescribeMetadataModelImports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelImportsMessage"}, + "output":{"shape":"DescribeMetadataModelImportsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Returns a paginated list of metadata model imports.
" + }, + "DescribeMigrationProjects":{ + "name":"DescribeMigrationProjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMigrationProjectsMessage"}, + "output":{"shape":"DescribeMigrationProjectsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"Returns a paginated list of migration projects for your account in the current region.
" + }, "DescribeOrderableReplicationInstances":{ "name":"DescribeOrderableReplicationInstances", "http":{ @@ -777,6 +1005,19 @@ ], "documentation":"Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.
Note that the \"last updated\" column the DMS console only indicates the time that DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.
" }, + "ExportMetadataModelAssessment":{ + "name":"ExportMetadataModelAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportMetadataModelAssessmentMessage"}, + "output":{"shape":"ExportMetadataModelAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Saves a copy of a database migration assessment report to your Amazon S3 bucket. DMS can save your assessment report as a comma-separated value (CSV) or a PDF file.
" + }, "ImportCertificate":{ "name":"ImportCertificate", "http":{ @@ -805,6 +1046,35 @@ ], "documentation":"Lists all metadata tags attached to an DMS resource, including replication instance, endpoint, subnet group, and migration task. For more information, see Tag data type description.
Modifies the specified schema conversion configuration using the provided parameters.
" + }, + "ModifyDataProvider":{ + "name":"ModifyDataProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDataProviderMessage"}, + "output":{"shape":"ModifyDataProviderResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"Modifies the specified data provider using the provided settings.
You must remove the data provider from all migration projects before you can modify it.
Modifies an existing DMS event notification subscription.
" }, + "ModifyInstanceProfile":{ + "name":"ModifyInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceProfileMessage"}, + "output":{"shape":"ModifyInstanceProfileResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"Modifies the specified instance profile using the provided parameters.
All migration projects associated with the instance profile must be deleted or modified before you can modify the instance profile.
Modifies the specified migration project using the provided parameters.
The migration project must be closed before you can modify it.
Runs large-scale assessment (LSA) analysis on every Fleet Advisor collector in your account.
" }, - "StartRecommendations":{ - "name":"StartRecommendations", + "StartExtensionPackAssociation":{ + "name":"StartExtensionPackAssociation", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"StartRecommendationsRequest"}, + "input":{"shape":"StartExtensionPackAssociationMessage"}, + "output":{"shape":"StartExtensionPackAssociationResponse"}, "errors":[ - {"shape":"InvalidResourceStateFault"}, {"shape":"AccessDeniedFault"}, - {"shape":"ResourceNotFoundFault"} + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} ], - "documentation":"Starts the analysis of your source database to provide recommendations of target engines.
You can create recommendations for multiple source databases using BatchStartRecommendations.
" + "documentation":"Applies the extension pack to your target database. An extension pack is an add-on module that emulates functions present in a source database that are required when converting objects to the target database.
" }, - "StartReplication":{ - "name":"StartReplication", + "StartMetadataModelAssessment":{ + "name":"StartMetadataModelAssessment", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"StartReplicationMessage"}, - "output":{"shape":"StartReplicationResponse"}, + "input":{"shape":"StartMetadataModelAssessmentMessage"}, + "output":{"shape":"StartMetadataModelAssessmentResponse"}, "errors":[ - {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"}, {"shape":"InvalidResourceStateFault"}, - {"shape":"AccessDeniedFault"} + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} ], - "documentation":"For a given DMS Serverless replication configuration, DMS connects to the source endpoint and collects the metadata to analyze the replication workload. Using this metadata, DMS then computes and provisions the required capacity and starts replicating to the target endpoint using the server resources that DMS has provisioned for the DMS Serverless replication.
" + "documentation":"Creates a database migration assessment report by assessing the migration complexity for your source database. A database migration assessment report summarizes all of the schema conversion tasks. It also details the action items for database objects that can't be converted to the database engine of your target database instance.
" }, - "StartReplicationTask":{ - "name":"StartReplicationTask", + "StartMetadataModelConversion":{ + "name":"StartMetadataModelConversion", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"StartReplicationTaskMessage"}, - "output":{"shape":"StartReplicationTaskResponse"}, + "input":{"shape":"StartMetadataModelConversionMessage"}, + "output":{"shape":"StartMetadataModelConversionResponse"}, "errors":[ - {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"}, {"shape":"InvalidResourceStateFault"}, - {"shape":"AccessDeniedFault"} + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} ], - "documentation":"Starts the replication task.
For more information about DMS tasks, see Working with Migration Tasks in the Database Migration Service User Guide.
" + "documentation":"Converts your source database objects to a format compatible with the target database.
" }, - "StartReplicationTaskAssessment":{ - "name":"StartReplicationTaskAssessment", + "StartMetadataModelExportAsScript":{ + "name":"StartMetadataModelExportAsScript", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"StartReplicationTaskAssessmentMessage"}, - "output":{"shape":"StartReplicationTaskAssessmentResponse"}, + "input":{"shape":"StartMetadataModelExportAsScriptMessage"}, + "output":{"shape":"StartMetadataModelExportAsScriptResponse"}, "errors":[ + {"shape":"AccessDeniedFault"}, {"shape":"InvalidResourceStateFault"}, - {"shape":"ResourceNotFoundFault"} + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} ], - "documentation":"Starts the replication task assessment for unsupported data types in the source database.
You can only use this operation for a task if the following conditions are true:
The task must be in the stopped state.
The task must have successful connections to the source and target.
If either of these conditions are not met, an InvalidResourceStateFault error will result.
For information about DMS task assessments, see Creating a task assessment report in the Database Migration Service User Guide.
" + "documentation":"Saves your converted code to a file as a SQL script, and stores this file on your Amazon S3 bucket.
" }, - "StartReplicationTaskAssessmentRun":{ - "name":"StartReplicationTaskAssessmentRun", + "StartMetadataModelExportToTarget":{ + "name":"StartMetadataModelExportToTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMetadataModelExportToTargetMessage"}, + "output":{"shape":"StartMetadataModelExportToTargetResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"Applies converted database objects to your target database.
" + }, + "StartMetadataModelImport":{ + "name":"StartMetadataModelImport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMetadataModelImportMessage"}, + "output":{"shape":"StartMetadataModelImportResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"Loads the metadata for all the dependent database objects of the parent object.
This operation uses your project's Amazon S3 bucket as a metadata cache to improve performance.
" + }, + "StartRecommendations":{ + "name":"StartRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartRecommendationsRequest"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Starts the analysis of your source database to provide recommendations of target engines.
You can create recommendations for multiple source databases using BatchStartRecommendations.
" + }, + "StartReplication":{ + "name":"StartReplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplicationMessage"}, + "output":{"shape":"StartReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"For a given DMS Serverless replication configuration, DMS connects to the source endpoint and collects the metadata to analyze the replication workload. Using this metadata, DMS then computes and provisions the required capacity and starts replicating to the target endpoint using the server resources that DMS has provisioned for the DMS Serverless replication.
" + }, + "StartReplicationTask":{ + "name":"StartReplicationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplicationTaskMessage"}, + "output":{"shape":"StartReplicationTaskResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"Starts the replication task.
For more information about DMS tasks, see Working with Migration Tasks in the Database Migration Service User Guide.
" + }, + "StartReplicationTaskAssessment":{ + "name":"StartReplicationTaskAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplicationTaskAssessmentMessage"}, + "output":{"shape":"StartReplicationTaskAssessmentResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"Starts the replication task assessment for unsupported data types in the source database.
You can only use this operation for a task if the following conditions are true:
The task must be in the stopped state.
The task must have successful connections to the source and target.
If either of these conditions are not met, an InvalidResourceStateFault error will result.
For information about DMS task assessments, see Creating a task assessment report in the Database Migration Service User Guide.
" + }, + "StartReplicationTaskAssessmentRun":{ + "name":"StartReplicationTaskAssessmentRun", "http":{ "method":"POST", "requestUri":"/" @@ -1253,6 +1678,18 @@ "type":"list", "member":{"shape":"String"} }, + "AssessmentReportType":{ + "type":"string", + "enum":[ + "pdf", + "csv" + ] + }, + "AssessmentReportTypesList":{ + "type":"list", + "member":{"shape":"AssessmentReportType"}, + "min":1 + }, "AuthMechanismValue":{ "type":"string", "enum":[ @@ -1615,6 +2052,44 @@ "type":"list", "member":{"shape":"Connection"} }, + "CreateDataProviderMessage":{ + "type":"structure", + "required":[ + "Engine", + "Settings" + ], + "members":{ + "DataProviderName":{ + "shape":"String", + "documentation":"A user-friendly name for the data provider.
" + }, + "Description":{ + "shape":"String", + "documentation":"A user-friendly description of the data provider.
" + }, + "Engine":{ + "shape":"String", + "documentation":"The type of database engine for the data provider. Valid values include \"aurora\", \"aurora_postgresql\", \"mysql\", \"oracle\", \"postgres\", and \"sqlserver\". A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.
The settings in JSON format for a data provider.
" + }, + "Tags":{ + "shape":"TagList", + "documentation":"One or more tags to be assigned to the data provider.
" + } + } + }, + "CreateDataProviderResponse":{ + "type":"structure", + "members":{ + "DataProvider":{ + "shape":"DataProvider", + "documentation":"The data provider that was created.
" + } + } + }, "CreateEndpointMessage":{ "type":"structure", "required":[ @@ -1869,6 +2344,107 @@ } } }, + "CreateInstanceProfileMessage":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "documentation":"The Availability Zone where the instance profile will be created. The default value is a random, system-chosen Availability Zone in the Amazon Web Services Region where your data provider is created, for examplem us-east-1d.
The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the connection parameters for the instance profile.
If you don't specify a value for the KmsKeyArn parameter, then DMS uses your default encryption key.
KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region.
" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"Specifies the accessibility options for the instance profile. A value of true represents an instance profile with a public IP address. A value of false represents an instance profile with a private IP address. The default value is true.
One or more tags to be assigned to the instance profile.
" + }, + "NetworkType":{ + "shape":"String", + "documentation":"Specifies the network type for the instance profile. A value of IPV4 represents an instance profile with IPv4 network type and only supports IPv4 addressing. A value of IPV6 represents an instance profile with IPv6 network type and only supports IPv6 addressing. A value of DUAL represents an instance profile with dual network type that supports IPv4 and IPv6 addressing.
A user-friendly name for the instance profile.
" + }, + "Description":{ + "shape":"String", + "documentation":"A user-friendly description of the instance profile.
" + }, + "SubnetGroupIdentifier":{ + "shape":"String", + "documentation":"A subnet group to associate with the instance profile.
" + }, + "VpcSecurityGroups":{ + "shape":"StringList", + "documentation":"Specifies the VPC security group names to be used with the instance profile. The VPC security group must work with the VPC containing the instance profile.
" + } + } + }, + "CreateInstanceProfileResponse":{ + "type":"structure", + "members":{ + "InstanceProfile":{ + "shape":"InstanceProfile", + "documentation":"The instance profile that was created.
" + } + } + }, + "CreateMigrationProjectMessage":{ + "type":"structure", + "required":[ + "SourceDataProviderDescriptors", + "TargetDataProviderDescriptors", + "InstanceProfileIdentifier" + ], + "members":{ + "MigrationProjectName":{ + "shape":"String", + "documentation":"A user-friendly name for the migration project.
" + }, + "SourceDataProviderDescriptors":{ + "shape":"DataProviderDescriptorDefinitionList", + "documentation":"Information about the source data provider, including the name, ARN, and Secrets Manager parameters.
" + }, + "TargetDataProviderDescriptors":{ + "shape":"DataProviderDescriptorDefinitionList", + "documentation":"Information about the target data provider, including the name, ARN, and Amazon Web Services Secrets Manager parameters.
" + }, + "InstanceProfileIdentifier":{ + "shape":"String", + "documentation":"The identifier of the associated instance profile. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.
" + }, + "TransformationRules":{ + "shape":"String", + "documentation":"The settings in JSON format for migration rules. Migration rules make it possible for you to change the object names according to the rules that you specify. For example, you can change an object name to lowercase or uppercase, add or remove a prefix or suffix, or rename objects.
" + }, + "Description":{ + "shape":"String", + "documentation":"A user-friendly description of the migration project.
" + }, + "Tags":{ + "shape":"TagList", + "documentation":"One or more tags to be assigned to the migration project.
" + }, + "SchemaConversionApplicationAttributes":{ + "shape":"SCApplicationAttributes", + "documentation":"The schema conversion application attributes, including the Amazon S3 bucket name and Amazon S3 role ARN.
" + } + } + }, + "CreateMigrationProjectResponse":{ + "type":"structure", + "members":{ + "MigrationProject":{ + "shape":"MigrationProject", + "documentation":"The migration project that was created.
" + } + } + }, "CreateReplicationConfigMessage":{ "type":"structure", "required":[ @@ -2137,6 +2713,100 @@ "parquet" ] }, + "DataProvider":{ + "type":"structure", + "members":{ + "DataProviderName":{ + "shape":"String", + "documentation":"The name of the data provider.
" + }, + "DataProviderArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) string that uniquely identifies the data provider.
" + }, + "DataProviderCreationTime":{ + "shape":"Iso8601DateTime", + "documentation":"The time the data provider was created.
" + }, + "Description":{ + "shape":"String", + "documentation":"A description of the data provider. Descriptions can have up to 31 characters. A description can contain only ASCII letters, digits, and hyphens ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter.
" + }, + "Engine":{ + "shape":"String", + "documentation":"The type of database engine for the data provider. Valid values include \"aurora\", \"aurora_postgresql\", \"mysql\", \"oracle\", \"postgres\", and \"sqlserver\". A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.
The settings in JSON format for a data provider.
" + } + }, + "documentation":"Provides information that defines a data provider.
" + }, + "DataProviderDescriptor":{ + "type":"structure", + "members":{ + "SecretsManagerSecretId":{ + "shape":"String", + "documentation":"The identifier of the Amazon Web Services Secrets Manager Secret used to store access credentials for the data provider.
" + }, + "SecretsManagerAccessRoleArn":{ + "shape":"String", + "documentation":"The ARN of the role used to access Amazon Web Services Secrets Manager.
" + }, + "DataProviderName":{ + "shape":"String", + "documentation":"The user-friendly name of the data provider.
" + }, + "DataProviderArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of the data provider.
" + } + }, + "documentation":"Information about a data provider.
" + }, + "DataProviderDescriptorDefinition":{ + "type":"structure", + "required":["DataProviderIdentifier"], + "members":{ + "DataProviderIdentifier":{ + "shape":"String", + "documentation":"The name or Amazon Resource Name (ARN) of the data provider.
" + }, + "SecretsManagerSecretId":{ + "shape":"String", + "documentation":"The identifier of the Amazon Web Services Secrets Manager Secret used to store access credentials for the data provider.
" + }, + "SecretsManagerAccessRoleArn":{ + "shape":"String", + "documentation":"The ARN of the role used to access Amazon Web Services Secrets Manager.
" + } + }, + "documentation":"Information about a data provider.
" + }, + "DataProviderDescriptorDefinitionList":{ + "type":"list", + "member":{"shape":"DataProviderDescriptorDefinition"} + }, + "DataProviderDescriptorList":{ + "type":"list", + "member":{"shape":"DataProviderDescriptor"} + }, + "DataProviderList":{ + "type":"list", + "member":{"shape":"DataProvider"} + }, + "DataProviderSettings":{ + "type":"structure", + "members":{ + "PostgreSqlSettings":{"shape":"PostgreSqlDataProviderSettings"}, + "MySqlSettings":{"shape":"MySqlDataProviderSettings"}, + "OracleSettings":{"shape":"OracleDataProviderSettings"}, + "MicrosoftSqlServerSettings":{"shape":"MicrosoftSqlServerDataProviderSettings"} + }, + "documentation":"Provides information that defines a data provider.
", + "union":true + }, "DatabaseInstanceSoftwareDetailsResponse":{ "type":"structure", "members":{ @@ -2257,6 +2927,16 @@ "DDMMYYYY" ] }, + "DefaultErrorDetails":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"The error message.
" + } + }, + "documentation":"Provides error information about a schema conversion operation.
" + }, "DeleteCertificateMessage":{ "type":"structure", "required":["CertificateArn"], @@ -2314,6 +2994,25 @@ }, "documentation":"" }, + "DeleteDataProviderMessage":{ + "type":"structure", + "required":["DataProviderIdentifier"], + "members":{ + "DataProviderIdentifier":{ + "shape":"String", + "documentation":"The identifier of the data provider to delete.
" + } + } + }, + "DeleteDataProviderResponse":{ + "type":"structure", + "members":{ + "DataProvider":{ + "shape":"DataProvider", + "documentation":"The data provider that was deleted.
" + } + } + }, "DeleteEndpointMessage":{ "type":"structure", "required":["EndpointArn"], @@ -2375,15 +3074,53 @@ } } }, - "DeleteReplicationConfigMessage":{ + "DeleteInstanceProfileMessage":{ "type":"structure", - "required":["ReplicationConfigArn"], + "required":["InstanceProfileIdentifier"], "members":{ - "ReplicationConfigArn":{ + "InstanceProfileIdentifier":{ "shape":"String", - "documentation":"The replication config to delete.
" + "documentation":"The identifier of the instance profile to delete.
" } - }, + } + }, + "DeleteInstanceProfileResponse":{ + "type":"structure", + "members":{ + "InstanceProfile":{ + "shape":"InstanceProfile", + "documentation":"The instance profile that was deleted.
" + } + } + }, + "DeleteMigrationProjectMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The name or Amazon Resource Name (ARN) of the migration project to delete.
" + } + } + }, + "DeleteMigrationProjectResponse":{ + "type":"structure", + "members":{ + "MigrationProject":{ + "shape":"MigrationProject", + "documentation":"The migration project that was deleted.
" + } + } + }, + "DeleteReplicationConfigMessage":{ + "type":"structure", + "required":["ReplicationConfigArn"], + "members":{ + "ReplicationConfigArn":{ + "shape":"String", + "documentation":"The replication config to delete.
" + } + }, "documentation":"" }, "DeleteReplicationConfigResponse":{ @@ -2606,6 +3343,59 @@ }, "documentation":"" }, + "DescribeConversionConfigurationMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The name or Amazon Resource Name (ARN) for the schema conversion project to describe.
" + } + } + }, + "DescribeConversionConfigurationResponse":{ + "type":"structure", + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The name or Amazon Resource Name (ARN) for the schema conversion project.
" + }, + "ConversionConfiguration":{ + "shape":"String", + "documentation":"The configuration parameters for the schema conversion project.
" + } + } + }, + "DescribeDataProvidersMessage":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"Filters applied to the data providers described in the form of key-value pairs.
" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A description of data providers.
" + } + } + }, "DescribeEndpointSettingsMessage":{ "type":"structure", "required":["EngineName"], @@ -2834,158 +3624,428 @@ "members":{ "Marker":{ "shape":"String", - "documentation":" An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
The events described.
" + } + }, + "documentation":"" + }, + "DescribeExtensionPackAssociationsMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The name or Amazon Resource Name (ARN) for the migration project.
" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"Filters applied to the extension pack associations described in the form of key-value pairs.
" + }, + "Marker":{ + "shape":"String", + "documentation":"Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A paginated list of extension pack associations for the specified migration project.
" + } + } + }, + "DescribeFleetAdvisorCollectorsRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"If you specify any of the following filters, the output includes information for only those collectors that meet the filter criteria:
collector-referenced-id – The ID of the collector agent, for example d4610ac5-e323-4ad9-bc50-eaf7249dfe9d.
collector-name – The name of the collector agent.
An example is: describe-fleet-advisor-collectors --filter Name=\"collector-referenced-id\",Values=\"d4610ac5-e323-4ad9-bc50-eaf7249dfe9d\"
Sets the maximum number of records returned in the response.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Provides descriptions of the Fleet Advisor collectors, including the collectors' name and ID, and the latest inventory data.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
If you specify any of the following filters, the output includes information for only those databases that meet the filter criteria:
database-id – The ID of the database.
database-name – The name of the database.
database-engine – The name of the database engine.
server-ip-address – The IP address of the database server.
database-ip-address – The IP address of the database.
collector-name – The name of the associated Fleet Advisor collector.
An example is: describe-fleet-advisor-databases --filter Name=\"database-id\",Values=\"45\"
Sets the maximum number of records returned in the response.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Provides descriptions of the Fleet Advisor collector databases, including the database's collector, ID, and name.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Sets the maximum number of records returned in the response.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
A list of FleetAdvisorLsaAnalysisResponse objects.
If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
If you specify any of the following filters, the output includes information for only those schema objects that meet the filter criteria:
schema-id – The ID of the schema, for example d4610ac5-e323-4ad9-bc50-eaf7249dfe9d.
Example: describe-fleet-advisor-schema-object-summary --filter Name=\"schema-id\",Values=\"50\"
Sets the maximum number of records returned in the response.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
A collection of FleetAdvisorSchemaObjectResponse objects.
If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
If you specify any of the following filters, the output includes information for only those schemas that meet the filter criteria:
complexity – The schema's complexity, for example Simple.
database-id – The ID of the schema's database.
database-ip-address – The IP address of the schema's database.
database-name – The name of the schema's database.
database-engine – The name of the schema database's engine.
original-schema-name – The name of the schema's database's main schema.
schema-id – The ID of the schema, for example 15.
schema-name – The name of the schema.
server-ip-address – The IP address of the schema database's server.
An example is: describe-fleet-advisor-schemas --filter Name=\"schema-id\",Values=\"50\"
Sets the maximum number of records returned in the response.
" + }, + "NextToken":{ + "shape":"String", + "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
A collection of SchemaResponse objects.
If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Filters applied to the instance profiles described in the form of key-value pairs.
" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A description of instance profiles.
" + } + } + }, + "DescribeMetadataModelAssessmentsMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The name or Amazon Resource Name (ARN) of the migration project.
" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"Filters applied to the metadata model assessments described in the form of key-value pairs.
" + }, + "Marker":{ + "shape":"String", + "documentation":"Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
The events described.
" + "Requests":{ + "shape":"SchemaConversionRequestList", + "documentation":"A paginated list of metadata model assessments for the specified migration project.
" } - }, - "documentation":"" + } }, - "DescribeFleetAdvisorCollectorsRequest":{ + "DescribeMetadataModelConversionsMessage":{ "type":"structure", + "required":["MigrationProjectIdentifier"], "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, "Filters":{ "shape":"FilterList", - "documentation":"If you specify any of the following filters, the output includes information for only those collectors that meet the filter criteria:
collector-referenced-id – The ID of the collector agent, for example d4610ac5-e323-4ad9-bc50-eaf7249dfe9d.
collector-name – The name of the collector agent.
An example is: describe-fleet-advisor-collectors --filter Name=\"collector-referenced-id\",Values=\"d4610ac5-e323-4ad9-bc50-eaf7249dfe9d\"
Filters applied to the metadata model conversions described in the form of key-value pairs.
" + }, + "Marker":{ + "shape":"String", + "documentation":"Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
Sets the maximum number of records returned in the response.
" - }, - "NextToken":{ - "shape":"String", - "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.
Provides descriptions of the Fleet Advisor collectors, including the collectors' name and ID, and the latest inventory data.
" - }, - "NextToken":{ + "Marker":{ "shape":"String", - "documentation":"If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A paginated list of metadata model conversions.
" } } }, - "DescribeFleetAdvisorDatabasesRequest":{ + "DescribeMetadataModelExportsAsScriptMessage":{ "type":"structure", + "required":["MigrationProjectIdentifier"], "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, "Filters":{ "shape":"FilterList", - "documentation":"If you specify any of the following filters, the output includes information for only those databases that meet the filter criteria:
database-id – The ID of the database.
database-name – The name of the database.
database-engine – The name of the database engine.
server-ip-address – The IP address of the database server.
database-ip-address – The IP address of the database.
collector-name – The name of the associated Fleet Advisor collector.
An example is: describe-fleet-advisor-databases --filter Name=\"database-id\",Values=\"45\"
Filters applied to the metadata model exports described in the form of key-value pairs.
" + }, + "Marker":{ + "shape":"String", + "documentation":"Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
Sets the maximum number of records returned in the response.
" - }, - "NextToken":{ - "shape":"String", - "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.
Provides descriptions of the Fleet Advisor collector databases, including the database's collector, ID, and name.
" - }, - "NextToken":{ + "Marker":{ "shape":"String", - "documentation":"If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A paginated list of metadata model exports.
" } } }, - "DescribeFleetAdvisorLsaAnalysisRequest":{ + "DescribeMetadataModelExportsToTargetMessage":{ "type":"structure", + "required":["MigrationProjectIdentifier"], "members":{ - "MaxRecords":{ - "shape":"IntegerOptional", - "documentation":"Sets the maximum number of records returned in the response.
" + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" }, - "NextToken":{ + "Filters":{ + "shape":"FilterList", + "documentation":"Filters applied to the metadata model exports described in the form of key-value pairs.
" + }, + "Marker":{ "shape":"String", - "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.
A list of FleetAdvisorLsaAnalysisResponse objects.
If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A paginated list of metadata model exports.
" } } }, - "DescribeFleetAdvisorSchemaObjectSummaryRequest":{ + "DescribeMetadataModelImportsMessage":{ "type":"structure", + "required":["MigrationProjectIdentifier"], "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, "Filters":{ "shape":"FilterList", - "documentation":"If you specify any of the following filters, the output includes information for only those schema objects that meet the filter criteria:
schema-id – The ID of the schema, for example d4610ac5-e323-4ad9-bc50-eaf7249dfe9d.
Example: describe-fleet-advisor-schema-object-summary --filter Name=\"schema-id\",Values=\"50\"
Filters applied to the metadata model imports described in the form of key-value pairs.
" + }, + "Marker":{ + "shape":"String", + "documentation":"Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
Sets the maximum number of records returned in the response.
" - }, - "NextToken":{ - "shape":"String", - "documentation":"If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
A paginated list of metadata model imports.
" } } }, - "DescribeFleetAdvisorSchemaObjectSummaryResponse":{ + "DescribeMetadataModelImportsResponse":{ "type":"structure", "members":{ - "FleetAdvisorSchemaObjects":{ - "shape":"FleetAdvisorSchemaObjectList", - "documentation":"A collection of FleetAdvisorSchemaObjectResponse objects.
If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A paginated list of metadata model imports.
" } } }, - "DescribeFleetAdvisorSchemasRequest":{ + "DescribeMigrationProjectsMessage":{ "type":"structure", "members":{ "Filters":{ "shape":"FilterList", - "documentation":"If you specify any of the following filters, the output includes information for only those schemas that meet the filter criteria:
complexity – The schema's complexity, for example Simple.
database-id – The ID of the schema's database.
database-ip-address – The IP address of the schema's database.
database-name – The name of the schema's database.
database-engine – The name of the schema database's engine.
original-schema-name – The name of the schema's database's main schema.
schema-id – The ID of the schema, for example 15.
schema-name – The name of the schema.
server-ip-address – The IP address of the schema database's server.
An example is: describe-fleet-advisor-schemas --filter Name=\"schema-id\",Values=\"50\"
Filters applied to the migration projects described in the form of key-value pairs.
" }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"Sets the maximum number of records returned in the response.
" + "documentation":"The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.
If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A collection of SchemaResponse objects.
If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.
Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.
A description of migration projects.
" } } }, @@ -3938,6 +4998,17 @@ "type":"list", "member":{"shape":"EngineVersion"} }, + "ErrorDetails":{ + "type":"structure", + "members":{ + "defaultErrorDetails":{ + "shape":"DefaultErrorDetails", + "documentation":"Error information about a project.
" + } + }, + "documentation":"Provides error information about a project.
", + "union":true + }, "Event":{ "type":"structure", "members":{ @@ -4041,6 +5112,72 @@ "type":"list", "member":{"shape":"String"} }, + "ExportMetadataModelAssessmentMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"A value that specifies the database objects to assess.
" + }, + "FileName":{ + "shape":"String", + "documentation":"The name of the assessment file to create in your Amazon S3 bucket.
" + }, + "AssessmentReportTypes":{ + "shape":"AssessmentReportTypesList", + "documentation":"The file format of the assessment file.
" + } + } + }, + "ExportMetadataModelAssessmentResponse":{ + "type":"structure", + "members":{ + "PdfReport":{ + "shape":"ExportMetadataModelAssessmentResultEntry", + "documentation":"The Amazon S3 details for an assessment exported in PDF format.
" + }, + "CsvReport":{ + "shape":"ExportMetadataModelAssessmentResultEntry", + "documentation":"The Amazon S3 details for an assessment exported in CSV format.
" + } + } + }, + "ExportMetadataModelAssessmentResultEntry":{ + "type":"structure", + "members":{ + "S3ObjectKey":{ + "shape":"String", + "documentation":"The object key for the object containing the exported metadata model assessment.
" + }, + "ObjectURL":{ + "shape":"String", + "documentation":"The URL for the object containing the exported metadata model assessment.
" + } + }, + "documentation":"Provides information about an exported metadata model assessment.
" + }, + "ExportSqlDetails":{ + "type":"structure", + "members":{ + "S3ObjectKey":{ + "shape":"String", + "documentation":"The Amazon S3 object key for the object containing the exported metadata model assessment.
" + }, + "ObjectURL":{ + "shape":"String", + "documentation":"The URL for the object containing the exported metadata model assessment.
" + } + }, + "documentation":"Provides information about a metadata model assessment exported to SQL.
" + }, "Filter":{ "type":"structure", "required":[ @@ -4266,6 +5403,56 @@ "type":"list", "member":{"shape":"String"} }, + "InstanceProfile":{ + "type":"structure", + "members":{ + "InstanceProfileArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) string that uniquely identifies the instance profile.
" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"The Availability Zone where the instance profile runs.
" + }, + "KmsKeyArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the connection parameters for the instance profile.
If you don't specify a value for the KmsKeyArn parameter, then DMS uses your default encryption key.
KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region.
" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"Specifies the accessibility options for the instance profile. A value of true represents an instance profile with a public IP address. A value of false represents an instance profile with a private IP address. The default value is true.
Specifies the network type for the instance profile. A value of IPV4 represents an instance profile with IPv4 network type and only supports IPv4 addressing. A value of IPV6 represents an instance profile with IPv6 network type and only supports IPv6 addressing. A value of DUAL represents an instance profile with dual network type that supports IPv4 and IPv6 addressing.
The user-friendly name for the instance profile.
" + }, + "Description":{ + "shape":"String", + "documentation":"A description of the instance profile. Descriptions can have up to 31 characters. A description can contain only ASCII letters, digits, and hyphens ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter.
" + }, + "InstanceProfileCreationTime":{ + "shape":"Iso8601DateTime", + "documentation":"The time the instance profile was created.
" + }, + "SubnetGroupIdentifier":{ + "shape":"String", + "documentation":"The identifier of the subnet group that is associated with the instance profile.
" + }, + "VpcSecurityGroups":{ + "shape":"StringList", + "documentation":"The VPC security groups that are used with the instance profile. The VPC security group must work with the VPC containing the instance profile.
" + } + }, + "documentation":"Provides information that defines an instance profile.
" + }, + "InstanceProfileList":{ + "type":"list", + "member":{"shape":"InstanceProfile"} + }, "InsufficientResourceCapacityFault":{ "type":"structure", "members":{ @@ -4335,6 +5522,10 @@ }, "documentation":"Describes a Fleet Advisor collector inventory.
" }, + "Iso8601DateTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "KMSAccessDeniedFault":{ "type":"structure", "members":{ @@ -4702,6 +5893,82 @@ }, "documentation":"Provides information that defines a Microsoft SQL Server endpoint.
" }, + "MicrosoftSqlServerDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{ + "shape":"String", + "documentation":"The name of the Microsoft SQL Server server.
" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"The port value for the Microsoft SQL Server data provider.
" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"The database name on the Microsoft SQL Server data provider.
" + }, + "SslMode":{ + "shape":"DmsSslModeValue", + "documentation":"The SSL mode used to connect to the Microsoft SQL Server data provider. The default value is none.
The Amazon Resource Name (ARN) of the certificate used for SSL connection.
" + } + }, + "documentation":"Provides information that defines a Microsoft SQL Server data provider.
" + }, + "MigrationProject":{ + "type":"structure", + "members":{ + "MigrationProjectName":{ + "shape":"String", + "documentation":"The name of the migration project.
" + }, + "MigrationProjectArn":{ + "shape":"String", + "documentation":"The ARN string that uniquely identifies the migration project.
" + }, + "MigrationProjectCreationTime":{ + "shape":"Iso8601DateTime", + "documentation":"The time when the migration project was created.
" + }, + "SourceDataProviderDescriptors":{ + "shape":"DataProviderDescriptorList", + "documentation":"Information about the source data provider, including the name or ARN, and Secrets Manager parameters.
" + }, + "TargetDataProviderDescriptors":{ + "shape":"DataProviderDescriptorList", + "documentation":"Information about the target data provider, including the name or ARN, and Secrets Manager parameters.
" + }, + "InstanceProfileArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of the instance profile for your migration project.
" + }, + "InstanceProfileName":{ + "shape":"String", + "documentation":"The name of the associated instance profile.
" + }, + "TransformationRules":{ + "shape":"String", + "documentation":"The settings in JSON format for migration rules. Migration rules make it possible for you to change the object names according to the rules that you specify. For example, you can change an object name to lowercase or uppercase, add or remove a prefix or suffix, or rename objects.
" + }, + "Description":{ + "shape":"String", + "documentation":"A user-friendly description of the migration project.
" + }, + "SchemaConversionApplicationAttributes":{ + "shape":"SCApplicationAttributes", + "documentation":"The schema conversion application attributes, including the Amazon S3 bucket name and Amazon S3 role ARN.
" + } + }, + "documentation":"Provides information that defines a migration project.
" + }, + "MigrationProjectList":{ + "type":"list", + "member":{"shape":"MigrationProject"} + }, "MigrationTypeValue":{ "type":"string", "enum":[ @@ -4710,6 +5977,71 @@ "full-load-and-cdc" ] }, + "ModifyConversionConfigurationMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "ConversionConfiguration" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, + "ConversionConfiguration":{ + "shape":"String", + "documentation":"The new conversion configuration.
" + } + } + }, + "ModifyConversionConfigurationResponse":{ + "type":"structure", + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The name or Amazon Resource Name (ARN) of the modified configuration.
" + } + } + }, + "ModifyDataProviderMessage":{ + "type":"structure", + "required":["DataProviderIdentifier"], + "members":{ + "DataProviderIdentifier":{ + "shape":"String", + "documentation":"The identifier of the data provider. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.
" + }, + "DataProviderName":{ + "shape":"String", + "documentation":"The name of the data provider.
" + }, + "Description":{ + "shape":"String", + "documentation":"A user-friendly description of the data provider.
" + }, + "Engine":{ + "shape":"String", + "documentation":"The type of database engine for the data provider. Valid values include \"aurora\", \"aurora_postgresql\", \"mysql\", \"oracle\", \"postgres\", and \"sqlserver\". A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.
If this attribute is Y, the current call to ModifyDataProvider replaces all existing data provider settings with the exact settings that you specify in this call. If this attribute is N, the current call to ModifyDataProvider does two things:
It replaces any data provider settings that already exist with new values, for settings with the same names.
It creates new data provider settings that you specify in the call, for settings with different names.
The settings in JSON format for a data provider.
" + } + } + }, + "ModifyDataProviderResponse":{ + "type":"structure", + "members":{ + "DataProvider":{ + "shape":"DataProvider", + "documentation":"The data provider that was modified.
" + } + } + }, "ModifyEndpointMessage":{ "type":"structure", "required":["EndpointArn"], @@ -4897,6 +6229,104 @@ }, "documentation":"" }, + "ModifyInstanceProfileMessage":{ + "type":"structure", + "required":["InstanceProfileIdentifier"], + "members":{ + "InstanceProfileIdentifier":{ + "shape":"String", + "documentation":"The identifier of the instance profile. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.
" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"The Availability Zone where the instance profile runs.
" + }, + "KmsKeyArn":{ + "shape":"String", + "documentation":"The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the connection parameters for the instance profile.
If you don't specify a value for the KmsKeyArn parameter, then DMS uses your default encryption key.
KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region.
" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"Specifies the accessibility options for the instance profile. A value of true represents an instance profile with a public IP address. A value of false represents an instance profile with a private IP address. The default value is true.
Specifies the network type for the instance profile. A value of IPV4 represents an instance profile with IPv4 network type and only supports IPv4 addressing. A value of IPV6 represents an instance profile with IPv6 network type and only supports IPv6 addressing. A value of DUAL represents an instance profile with dual network type that supports IPv4 and IPv6 addressing.
A user-friendly name for the instance profile.
" + }, + "Description":{ + "shape":"String", + "documentation":"A user-friendly description for the instance profile.
" + }, + "SubnetGroupIdentifier":{ + "shape":"String", + "documentation":"A subnet group to associate with the instance profile.
" + }, + "VpcSecurityGroups":{ + "shape":"StringList", + "documentation":"Specifies the VPC security groups to be used with the instance profile. The VPC security group must work with the VPC containing the instance profile.
" + } + } + }, + "ModifyInstanceProfileResponse":{ + "type":"structure", + "members":{ + "InstanceProfile":{ + "shape":"InstanceProfile", + "documentation":"The instance profile that was modified.
" + } + } + }, + "ModifyMigrationProjectMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The identifier of the migration project. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.
" + }, + "MigrationProjectName":{ + "shape":"String", + "documentation":"A user-friendly name for the migration project.
" + }, + "SourceDataProviderDescriptors":{ + "shape":"DataProviderDescriptorDefinitionList", + "documentation":"Information about the source data provider, including the name, ARN, and Amazon Web Services Secrets Manager parameters.
" + }, + "TargetDataProviderDescriptors":{ + "shape":"DataProviderDescriptorDefinitionList", + "documentation":"Information about the target data provider, including the name, ARN, and Amazon Web Services Secrets Manager parameters.
" + }, + "InstanceProfileIdentifier":{ + "shape":"String", + "documentation":"The name or Amazon Resource Name (ARN) for the instance profile.
" + }, + "TransformationRules":{ + "shape":"String", + "documentation":"The settings in JSON format for migration rules. Migration rules make it possible for you to change the object names according to the rules that you specify. For example, you can change an object name to lowercase or uppercase, add or remove a prefix or suffix, or rename objects.
" + }, + "Description":{ + "shape":"String", + "documentation":"A user-friendly description of the migration project.
" + }, + "SchemaConversionApplicationAttributes":{ + "shape":"SCApplicationAttributes", + "documentation":"The schema conversion application attributes, including the Amazon S3 bucket name and Amazon S3 role ARN.
" + } + } + }, + "ModifyMigrationProjectResponse":{ + "type":"structure", + "members":{ + "MigrationProject":{ + "shape":"MigrationProject", + "documentation":"The migration project that was modified.
" + } + } + }, "ModifyReplicationConfigMessage":{ "type":"structure", "required":["ReplicationConfigArn"], @@ -5259,6 +6689,28 @@ }, "documentation":"Provides information that defines a MySQL endpoint.
" }, + "MySqlDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{ + "shape":"String", + "documentation":"The name of the MySQL server.
" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"The port value for the MySQL data provider.
" + }, + "SslMode":{ + "shape":"DmsSslModeValue", + "documentation":"The SSL mode used to connect to the MySQL data provider. The default value is none.
The Amazon Resource Name (ARN) of the certificate used for SSL connection.
" + } + }, + "documentation":"Provides information that defines a MySQL data provider.
" + }, "NeptuneSettings":{ "type":"structure", "required":[ @@ -5304,6 +6756,52 @@ "one" ] }, + "OracleDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{ + "shape":"String", + "documentation":"The name of the Oracle server.
" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"The port value for the Oracle data provider.
" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"The database name on the Oracle data provider.
" + }, + "SslMode":{ + "shape":"DmsSslModeValue", + "documentation":"The SSL mode used to connect to the Oracle data provider. The default value is none.
The Amazon Resource Name (ARN) of the certificate used for SSL connection.
" + }, + "AsmServer":{ + "shape":"String", + "documentation":"The address of your Oracle Automatic Storage Management (ASM) server. You can set this value from the asm_server value. You set asm_server as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see Configuration for change data capture (CDC) on an Oracle source database.
The identifier of the secret in Secrets Manager that contains the Oracle ASM connection details.
Required only if your data provider uses the Oracle ASM server.
" + }, + "SecretsManagerOracleAsmAccessRoleArn":{ + "shape":"String", + "documentation":"The ARN of the IAM role that provides access to the secret in Secrets Manager that contains the Oracle ASM connection details.
" + }, + "SecretsManagerSecurityDbEncryptionSecretId":{ + "shape":"String", + "documentation":"The identifier of the secret in Secrets Manager that contains the transparent data encryption (TDE) password. DMS requires this password to access Oracle redo logs encrypted by TDE using Binary Reader.
" + }, + "SecretsManagerSecurityDbEncryptionAccessRoleArn":{ + "shape":"String", + "documentation":"The ARN of the IAM role that provides access to the secret in Secrets Manager that contains the TDE password.
" + } + }, + "documentation":"Provides information that defines an Oracle data provider.
" + }, "OracleSettings":{ "type":"structure", "members":{ @@ -5528,6 +7026,13 @@ "type":"list", "member":{"shape":"OrderableReplicationInstance"} }, + "OriginTypeValue":{ + "type":"string", + "enum":[ + "SOURCE", + "TARGET" + ] + }, "ParquetVersionValue":{ "type":"string", "enum":[ @@ -5683,6 +7188,32 @@ }, "documentation":"Provides information that defines a PostgreSQL endpoint.
" }, + "PostgreSqlDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{ + "shape":"String", + "documentation":"The name of the PostgreSQL server.
" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"The port value for the PostgreSQL data provider.
" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"The database name on the PostgreSQL data provider.
" + }, + "SslMode":{ + "shape":"DmsSslModeValue", + "documentation":"The SSL mode used to connect to the PostgreSQL data provider. The default value is none.
The Amazon Resource Name (ARN) of the certificate used for SSL connection.
" + } + }, + "documentation":"Provides information that defines a PostgreSQL data provider.
" + }, "ProvisionData":{ "type":"structure", "members":{ @@ -7175,6 +8706,20 @@ }, "documentation":"Settings for exporting data to Amazon S3.
" }, + "SCApplicationAttributes":{ + "type":"structure", + "members":{ + "S3BucketPath":{ + "shape":"String", + "documentation":"The path for the Amazon S3 bucket that the application uses for exporting assessment reports.
" + }, + "S3BucketRoleArn":{ + "shape":"String", + "documentation":"The ARN for the role the application uses to access its Amazon S3 bucket.
" + } + }, + "documentation":"Provides information that defines a schema conversion application.
" + }, "SNSInvalidTopicFault":{ "type":"structure", "members":{ @@ -7205,6 +8750,30 @@ "shared-automatic-truncation" ] }, + "SchemaConversionRequest":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"String", + "documentation":"The schema conversion action status.
" + }, + "RequestIdentifier":{ + "shape":"String", + "documentation":"The identifier for the schema conversion action.
" + }, + "MigrationProjectArn":{ + "shape":"String", + "documentation":"The migration project ARN.
" + }, + "Error":{"shape":"ErrorDetails"}, + "ExportSqlDetails":{"shape":"ExportSqlDetails"} + }, + "documentation":"Provides information about a schema conversion action.
" + }, + "SchemaConversionRequestList":{ + "type":"list", + "member":{"shape":"SchemaConversionRequest"} + }, "SchemaList":{ "type":"list", "member":{"shape":"String"} @@ -7311,6 +8880,177 @@ "ssl-encryption" ] }, + "StartExtensionPackAssociationMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + } + } + }, + "StartExtensionPackAssociationResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"The identifier for the request operation.
" + } + } + }, + "StartMetadataModelAssessmentMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"A value that specifies the database objects to assess.
" + } + } + }, + "StartMetadataModelAssessmentResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"The identifier for the assessment operation.
" + } + } + }, + "StartMetadataModelConversionMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"A value that specifies the database objects to convert.
" + } + } + }, + "StartMetadataModelConversionResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"The identifier for the conversion operation.
" + } + } + }, + "StartMetadataModelExportAsScriptMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules", + "Origin" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"A value that specifies the database objects to export.
" + }, + "Origin":{ + "shape":"OriginTypeValue", + "documentation":"Whether to export the metadata model from the source or the target.
" + }, + "FileName":{ + "shape":"String", + "documentation":"The name of the model file to create in the Amazon S3 bucket.
" + } + } + }, + "StartMetadataModelExportAsScriptResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"The identifier for the export operation.
" + } + } + }, + "StartMetadataModelExportToTargetMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"A value that specifies the database objects to export.
" + }, + "OverwriteExtensionPack":{ + "shape":"BooleanOptional", + "documentation":"Whether to overwrite the migration project extension pack. An extension pack is an add-on module that emulates functions present in a source database that are required when converting objects to the target database.
" + } + } + }, + "StartMetadataModelExportToTargetResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"The identifier for the export operation.
" + } + } + }, + "StartMetadataModelImportMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules", + "Origin" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"The migration project name or Amazon Resource Name (ARN).
" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"A value that specifies the database objects to import.
" + }, + "Origin":{ + "shape":"OriginTypeValue", + "documentation":"Whether to load metadata to the source or target database.
" + }, + "Refresh":{ + "shape":"Boolean", + "documentation":"If true, DMS loads metadata for the specified objects from the source database.
The identifier for the import operation.
" + } + } + }, "StartRecommendationsRequest":{ "type":"structure", "required":[ From 6ac528330699563faafc82808bdadca752ab7830 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Thu, 3 Aug 2023 22:27:54 +0000 Subject: [PATCH 106/270] AWS Cloud9 Update: Updated the deprecation date for Amazon Linux. Doc only update. --- .../feature-AWSCloud9-7dc3a16.json | 6 + .../codegen-resources/endpoint-rule-set.json | 392 ++++---- .../codegen-resources/endpoint-tests.json | 869 ++++-------------- .../codegen-resources/service-2.json | 2 +- 4 files changed, 379 insertions(+), 890 deletions(-) create mode 100644 .changes/next-release/feature-AWSCloud9-7dc3a16.json diff --git a/.changes/next-release/feature-AWSCloud9-7dc3a16.json b/.changes/next-release/feature-AWSCloud9-7dc3a16.json new file mode 100644 index 000000000000..aa35340e5228 --- /dev/null +++ b/.changes/next-release/feature-AWSCloud9-7dc3a16.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS Cloud9", + "contributor": "", + "description": "Updated the deprecation date for Amazon Linux. Doc only update." +} diff --git a/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json index 7f41719e6ef3..fbaea21794bd 100644 --- a/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloud9-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloud9-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloud9-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloud9.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://cloud9-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloud9.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloud9.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://cloud9.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/cloud9/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloud9/src/main/resources/codegen-resources/endpoint-tests.json index 247e617ca0f7..b28ec45a62fe 100644 --- a/services/cloud9/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloud9/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,1099 +1,533 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.eu-west-2.amazonaws.com" + "url": "https://cloud9.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "Region": "eu-west-2", "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.eu-west-1.api.aws" + "url": "https://cloud9.ap-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.eu-west-1.amazonaws.com" + "url": "https://cloud9.ap-northeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-west-1", + "Region": "ap-northeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.eu-west-1.api.aws" + "url": "https://cloud9.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.eu-west-1.amazonaws.com" + "url": "https://cloud9.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "eu-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-3.api.aws" + "url": "https://cloud9.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-3.amazonaws.com" + "url": "https://cloud9.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", + "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-3.api.aws" + "url": "https://cloud9.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-3.amazonaws.com" + "url": "https://cloud9.ca-central-1.amazonaws.com" } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ap-northeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-2.api.aws" + "url": "https://cloud9.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-2.amazonaws.com" + "url": "https://cloud9.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-2.api.aws" + "url": "https://cloud9.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-2.amazonaws.com" + "url": "https://cloud9.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "ap-northeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-1.api.aws" + "url": "https://cloud9.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-1.amazonaws.com" + "url": "https://cloud9.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-1.api.aws" + "url": "https://cloud9.me-south-1.amazonaws.com" } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-1.amazonaws.com" + "url": "https://cloud9.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ap-northeast-1", "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.me-south-1.api.aws" + "url": "https://cloud9.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.me-south-1.amazonaws.com" + "url": "https://cloud9.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "me-south-1", + "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.me-south-1.api.aws" + "url": "https://cloud9.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.me-south-1.amazonaws.com" + "url": "https://cloud9.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "me-south-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.sa-east-1.api.aws" + "url": "https://cloud9-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "sa-east-1", "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.sa-east-1.amazonaws.com" + "url": "https://cloud9-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "sa-east-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9.sa-east-1.api.aws" + "url": "https://cloud9.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "sa-east-1", "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-east-1.api.aws" + "url": "https://cloud9-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": true } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-east-1.amazonaws.com" + "url": "https://cloud9-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-east-1.api.aws" + "url": "https://cloud9.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": true } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-east-1.amazonaws.com" + "url": "https://cloud9.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-southeast-1.api.aws" + "url": "https://cloud9-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-southeast-1", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-southeast-1.amazonaws.com" + "url": "https://cloud9-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-southeast-1.api.aws" + "url": "https://cloud9.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-southeast-1.amazonaws.com" + "url": "https://cloud9.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloud9-fips.ap-southeast-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-southeast-2.amazonaws.com" + "url": "https://cloud9-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloud9.ap-southeast-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-southeast-2.amazonaws.com" + "url": "https://cloud9.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.us-east-1.amazonaws.com" + "url": "https://cloud9-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloud9.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.us-east-1.amazonaws.com" + "url": "https://cloud9.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://cloud9.us-east-2.amazonaws.com" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1101,7 +535,6 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1112,8 +545,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1124,11 +557,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloud9/src/main/resources/codegen-resources/service-2.json b/services/cloud9/src/main/resources/codegen-resources/service-2.json index 7e9cca54ffbf..2d2f489783d0 100644 --- a/services/cloud9/src/main/resources/codegen-resources/service-2.json +++ b/services/cloud9/src/main/resources/codegen-resources/service-2.json @@ -330,7 +330,7 @@ }, "imageId":{ "shape":"ImageId", - "documentation":"The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.
The default Amazon Linux AMI is currently used if the parameter isn't explicitly assigned a value in the request.
In the future the parameter for Amazon Linux will no longer be available when you specify an AMI for your instance. Amazon Linux 2 will then become the default AMI, which is used to launch your instance if no parameter is explicitly defined.
AMI aliases
Amazon Linux (default): amazonlinux-1-x86_64
Amazon Linux 2: amazonlinux-2-x86_64
Ubuntu 18.04: ubuntu-18.04-x86_64
SSM paths
Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64
Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64
Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64
The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.
The default Amazon Linux AMI is currently used if the parameter isn't explicitly assigned a value in the request. Because Amazon Linux AMI has ended standard support as of December 31, 2020, we recommend you choose Amazon Linux 2, which includes long term support through 2023.
From December 31, 2023, the parameter for Amazon Linux will no longer be available when you specify an AMI for your instance. Amazon Linux 2 will then become the default AMI, which is used to launch your instance if no parameter is explicitly defined.
AMI aliases
Amazon Linux (default): amazonlinux-1-x86_64
Amazon Linux 2: amazonlinux-2-x86_64
Ubuntu 18.04: ubuntu-18.04-x86_64
SSM paths
Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64
Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64
Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64
Alphanumeric string that can be used to distinguish between calls to the IssueCertificate action. Idempotency tokens for IssueCertificate time out after one minute. Therefore, if you call IssueCertificate multiple times with the same idempotency token within one minute, Amazon Web Services Private CA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, Amazon Web Services Private CA recognizes that you are requesting multiple certificates.
" + "documentation":"Alphanumeric string that can be used to distinguish between calls to the IssueCertificate action. Idempotency tokens for IssueCertificate time out after five minutes. Therefore, if you call IssueCertificate multiple times with the same idempotency token within five minutes, Amazon Web Services Private CA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, Amazon Web Services Private CA recognizes that you are requesting multiple certificates.
" } } }, @@ -2080,5 +2080,5 @@ ] } }, - "documentation":"This is the Amazon Web Services Private Certificate Authority API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing a private certificate authority (CA) for your organization.
The documentation for each action shows the API request parameters and the JSON response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you prefer. For more information, see Amazon Web Services SDKs.
Each Amazon Web Services Private CA API operation has a quota that determines the number of times the operation can be called per second. Amazon Web Services Private CA throttles API requests at different rates depending on the operation. Throttling means that Amazon Web Services Private CA rejects an otherwise valid request because the request exceeds the operation's quota for the number of requests per second. When a request is throttled, Amazon Web Services Private CA returns a ThrottlingException error. Amazon Web Services Private CA does not guarantee a minimum request rate for APIs.
To see an up-to-date list of your Amazon Web Services Private CA quotas, or to request a quota increase, log into your Amazon Web Services account and visit the Service Quotas console.
" + "documentation":"This is the Amazon Web Services Private Certificate Authority API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing a private certificate authority (CA) for your organization.
The documentation for each action shows the API request parameters and the JSON response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you prefer. For more information, see Amazon Web Services SDKs.
Each Amazon Web Services Private CA API operation has a quota that determines the number of times the operation can be called per second. Amazon Web Services Private CA throttles API requests at different rates depending on the operation. Throttling means that Amazon Web Services Private CA rejects an otherwise valid request because the request exceeds the operation's quota for the number of requests per second. When a request is throttled, Amazon Web Services Private CA returns a ThrottlingException error. Amazon Web Services Private CA does not guarantee a minimum request rate for APIs.
To see an up-to-date list of your Amazon Web Services Private CA quotas, or to request a quota increase, log into your Amazon Web Services account and visit the Service Quotas console.
" } From 12b98e982b425e7f178fc6a8c8472178c95132d5 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 4 Aug 2023 18:16:49 +0000 Subject: [PATCH 110/270] Amazon Connect Service Update: Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile. --- .../feature-AmazonConnectService-2293d92.json | 6 + .../codegen-resources/endpoint-rule-set.json | 362 ++++++++---------- .../codegen-resources/service-2.json | 57 +++ 3 files changed, 226 insertions(+), 199 deletions(-) create mode 100644 .changes/next-release/feature-AmazonConnectService-2293d92.json diff --git a/.changes/next-release/feature-AmazonConnectService-2293d92.json b/.changes/next-release/feature-AmazonConnectService-2293d92.json new file mode 100644 index 000000000000..227a5de7150a --- /dev/null +++ b/.changes/next-release/feature-AmazonConnectService-2293d92.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile." +} diff --git a/services/connect/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/connect/src/main/resources/codegen-resources/endpoint-rule-set.json index 57834595dab7..1f6adf2f2f3f 100644 --- a/services/connect/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/connect/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://connect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://connect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://connect.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://connect-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://connect.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://connect-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://connect.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://connect.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://connect.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://connect.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index 2dfb0061ea07..b6c91862319f 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -3108,6 +3108,22 @@ ], "documentation":"Updates the name and description of a quick connect. The request accepts the following data in JSON format. At least Name or Description must be provided.
Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
" + }, "UpdateRoutingProfileConcurrency":{ "name":"UpdateRoutingProfileConcurrency", "http":{ @@ -3444,6 +3460,13 @@ "type":"integer", "min":0 }, + "AgentAvailabilityTimer":{ + "type":"string", + "enum":[ + "TIME_SINCE_LAST_ACTIVITY", + "TIME_SINCE_LAST_INBOUND" + ] + }, "AgentContactReference":{ "type":"structure", "members":{ @@ -5159,6 +5182,10 @@ "Tags":{ "shape":"TagMap", "documentation":"The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.
" + }, + "AgentAvailabilityTimer":{ + "shape":"AgentAvailabilityTimer", + "documentation":"Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
" } } }, @@ -12855,6 +12882,10 @@ "NumberOfAssociatedUsers":{ "shape":"Long", "documentation":"The number of associated users in routing profile.
" + }, + "AgentAvailabilityTimer":{ + "shape":"AgentAvailabilityTimer", + "documentation":"Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
" } }, "documentation":"Contains information about a routing profile.
" @@ -15936,6 +15967,32 @@ } } }, + "UpdateRoutingProfileAgentAvailabilityTimerRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "RoutingProfileId", + "AgentAvailabilityTimer" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
", + "location":"uri", + "locationName":"InstanceId" + }, + "RoutingProfileId":{ + "shape":"RoutingProfileId", + "documentation":"The identifier of the routing profile.
", + "location":"uri", + "locationName":"RoutingProfileId" + }, + "AgentAvailabilityTimer":{ + "shape":"AgentAvailabilityTimer", + "documentation":"Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.
" + } + } + }, "UpdateRoutingProfileConcurrencyRequest":{ "type":"structure", "required":[ From 631f7dedb84a1f0f842f9762bb9fc6a193190a8f Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 4 Aug 2023 18:16:50 +0000 Subject: [PATCH 111/270] Amazon SageMaker Service Update: Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object --- .../feature-AmazonSageMakerService-8ec4854.json | 6 ++++++ .../src/main/resources/codegen-resources/service-2.json | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 .changes/next-release/feature-AmazonSageMakerService-8ec4854.json diff --git a/.changes/next-release/feature-AmazonSageMakerService-8ec4854.json b/.changes/next-release/feature-AmazonSageMakerService-8ec4854.json new file mode 100644 index 000000000000..56f6008ed8d8 --- /dev/null +++ b/.changes/next-release/feature-AmazonSageMakerService-8ec4854.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object" +} diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index 675695d3a7a8..d1c11aff6a66 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -32598,7 +32598,8 @@ "Tags":{ "shape":"TagList", "documentation":"A list of tags associated with the transform job.
" - } + }, + "DataCaptureConfig":{"shape":"BatchDataCaptureConfig"} }, "documentation":"A batch transform job. For information about SageMaker batch transform, see Use Batch Transform.
" }, From 0d550b09a5cc003d312474f69afc3fcd196f87e7 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 4 Aug 2023 18:16:58 +0000 Subject: [PATCH 112/270] AWS DataSync Update: Display cloud storage used capacity at a cluster level. --- .../feature-AWSDataSync-8ffdea3.json | 6 + .../codegen-resources/endpoint-rule-set.json | 344 ++++++++---------- .../codegen-resources/service-2.json | 40 +- 3 files changed, 184 insertions(+), 206 deletions(-) create mode 100644 .changes/next-release/feature-AWSDataSync-8ffdea3.json diff --git a/.changes/next-release/feature-AWSDataSync-8ffdea3.json b/.changes/next-release/feature-AWSDataSync-8ffdea3.json new file mode 100644 index 000000000000..e370e86590df --- /dev/null +++ b/.changes/next-release/feature-AWSDataSync-8ffdea3.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "Display cloud storage used capacity at a cluster level." +} diff --git a/services/datasync/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/datasync/src/main/resources/codegen-resources/endpoint-rule-set.json index d1fda177e764..0d32931aa5ad 100644 --- a/services/datasync/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/datasync/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://datasync-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://datasync-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://datasync-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://datasync-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://datasync.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://datasync.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://datasync.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://datasync.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/datasync/src/main/resources/codegen-resources/service-2.json b/services/datasync/src/main/resources/codegen-resources/service-2.json index db15acce7771..1260e47922d1 100644 --- a/services/datasync/src/main/resources/codegen-resources/service-2.json +++ b/services/datasync/src/main/resources/codegen-resources/service-2.json @@ -167,7 +167,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Creates an endpoint for an Network File System (NFS) file server that DataSync can use for a data transfer.
" + "documentation":"Creates an endpoint for a Network File System (NFS) file server that DataSync can use for a data transfer.
For more information, see Configuring transfers to or from an NFS file server.
If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone.
Returns metadata, such as the path information, about an NFS location.
" + "documentation":"Provides details about how an DataSync transfer location for a Network File System (NFS) file server is configured.
" }, "DescribeLocationObjectStorage":{ "name":"DescribeLocationObjectStorage", @@ -794,7 +794,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"Updates some of the parameters of a previously created location for Network File System (NFS) access. For information about creating an NFS location, see Creating a location for NFS.
" + "documentation":"Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync.
For more information, see Configuring transfers to or from an NFS file server.
" }, "UpdateLocationObjectStorage":{ "name":"UpdateLocationObjectStorage", @@ -1054,6 +1054,10 @@ "LogicalUsed":{ "shape":"NonNegativeLong", "documentation":"The amount of space that's being used in a storage system resource without accounting for compression or deduplication.
" + }, + "ClusterCloudStorageUsed":{ + "shape":"NonNegativeLong", + "documentation":"The amount of space in the cluster that's in cloud storage (for example, if you're using data tiering).
" } }, "documentation":"The storage capacity of an on-premises storage system resource (for example, a volume).
" @@ -1441,19 +1445,19 @@ "members":{ "Subdirectory":{ "shape":"NfsSubdirectory", - "documentation":"Specifies the subdirectory in the NFS file server that DataSync transfers to or from. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
" + "documentation":"Specifies the export path in your NFS file server that you want DataSync to mount.
This path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see Accessing NFS file servers.
" }, "ServerHostname":{ "shape":"ServerHostname", - "documentation":"Specifies the IP address or domain name of your NFS file server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
You must specify be an IP version 4 address or Domain Name System (DNS)-compliant name.
Specifies the Domain Name System (DNS) name or IP version 4 address of the NFS file server that your DataSync agent connects to.
" }, "OnPremConfig":{ "shape":"OnPremConfig", - "documentation":"Specifies the Amazon Resource Names (ARNs) of agents that DataSync uses to connect to your NFS file server.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server.
You can specify more than one agent. For more information, see Using multiple agents for transfers.
" }, "MountOptions":{ "shape":"NfsMountOptions", - "documentation":"Specifies the mount options that DataSync can use to mount your NFS share.
" + "documentation":"Specifies the options that DataSync can use to mount your NFS file server.
" }, "Tags":{ "shape":"InputTagList", @@ -2134,7 +2138,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"The Amazon Resource Name (ARN) of the NFS location to describe.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the NFS location that you want information about.
" } }, "documentation":"DescribeLocationNfsRequest
" @@ -2144,20 +2148,20 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"The Amazon Resource Name (ARN) of the NFS location that was described.
" + "documentation":"The ARN of the NFS location.
" }, "LocationUri":{ "shape":"LocationUri", - "documentation":"The URL of the source NFS location that was described.
" + "documentation":"The URL of the NFS location.
" }, "OnPremConfig":{"shape":"OnPremConfig"}, "MountOptions":{ "shape":"NfsMountOptions", - "documentation":"The mount options that DataSync uses to mount your NFS share.
" + "documentation":"The mount options that DataSync uses to mount your NFS file server.
" }, "CreationTime":{ "shape":"Time", - "documentation":"The time that the NFS location was created.
" + "documentation":"The time when the NFS location was created.
" } }, "documentation":"DescribeLocationNfsResponse
" @@ -3505,6 +3509,10 @@ "LunCount":{ "shape":"NonNegativeLong", "documentation":"The number of LUNs (logical unit numbers) in the cluster.
" + }, + "ClusterCloudStorageUsed":{ + "shape":"NonNegativeLong", + "documentation":"The amount of space in the cluster that's in cloud storage (for example, if you're using data tiering).
" } }, "documentation":"The information that DataSync Discovery collects about an on-premises storage system cluster.
" @@ -3742,10 +3750,10 @@ "members":{ "AgentArns":{ "shape":"AgentArnList", - "documentation":"ARNs of the agents to use for an NFS location.
" + "documentation":"The Amazon Resource Names (ARNs) of the agents connecting to a transfer location.
" } }, - "documentation":"A list of Amazon Resource Names (ARNs) of agents to use for a Network File System (NFS) location.
" + "documentation":"The DataSync agents that are connecting to a Network File System (NFS) location.
" }, "Operator":{ "type":"string", @@ -4731,11 +4739,11 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"Specifies the Amazon Resource Name (ARN) of the NFS location that you want to update.
" + "documentation":"Specifies the Amazon Resource Name (ARN) of the NFS transfer location that you want to update.
" }, "Subdirectory":{ "shape":"NfsSubdirectory", - "documentation":"Specifies the subdirectory in your NFS file system that DataSync uses to read from or write to during a transfer. The NFS path should be exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.
To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
" + "documentation":"Specifies the export path in your NFS file server that you want DataSync to mount.
This path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see Accessing NFS file servers.
" }, "OnPremConfig":{"shape":"OnPremConfig"}, "MountOptions":{"shape":"NfsMountOptions"} From e6d46af27b58a8eda2d71846a6dceae93f507c64 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 4 Aug 2023 18:17:00 +0000 Subject: [PATCH 113/270] Amazon EC2 Container Service Update: This is a documentation update to address various tickets. --- ...ure-AmazonEC2ContainerService-3e7a97e.json | 6 + .../codegen-resources/endpoint-rule-set.json | 344 ++++++++---------- .../codegen-resources/service-2.json | 42 +-- 3 files changed, 181 insertions(+), 211 deletions(-) create mode 100644 .changes/next-release/feature-AmazonEC2ContainerService-3e7a97e.json diff --git a/.changes/next-release/feature-AmazonEC2ContainerService-3e7a97e.json b/.changes/next-release/feature-AmazonEC2ContainerService-3e7a97e.json new file mode 100644 index 000000000000..94233204829a --- /dev/null +++ b/.changes/next-release/feature-AmazonEC2ContainerService-3e7a97e.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This is a documentation update to address various tickets." +} diff --git a/services/ecs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/ecs/src/main/resources/codegen-resources/endpoint-rule-set.json index 1614858d7cb1..57a28815f47c 100644 --- a/services/ecs/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/ecs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ecs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://ecs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://ecs-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://ecs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ecs.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://ecs.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ecs.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://ecs.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index 3c6fb39aea7d..6e93278b9dd5 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -351,7 +351,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ClusterNotFoundException"} ], - "documentation":"Describes a specified task or tasks.
Currently, stopped tasks appear in the returned results for at least one hour.
" + "documentation":"Describes a specified task or tasks.
Currently, stopped tasks appear in the returned results for at least one hour.
If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the response. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are not included in the response.
" }, "DiscoverPollEndpoint":{ "name":"DiscoverPollEndpoint", @@ -557,7 +557,7 @@ {"shape":"ClusterNotFoundException"}, {"shape":"ServiceNotFoundException"} ], - "documentation":"Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task.
Recently stopped tasks might appear in the returned results. Currently, stopped tasks appear in the returned results for at least one hour.
" + "documentation":"Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task.
Recently stopped tasks might appear in the returned results.
" }, "PutAccountSetting":{ "name":"PutAccountSetting", @@ -890,7 +890,7 @@ {"shape":"AccessDeniedException"}, {"shape":"NamespaceNotFoundException"} ], - "documentation":"Modifies the parameters of a service.
For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.
If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.
If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.
The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties. If you specified a custom role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide.
loadBalancers,
serviceRegistries
Modifies the parameters of a service.
For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.
If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.
If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.
The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties:
loadBalancers,
serviceRegistries
For more information about the role see the CreateService request parameter role .
The Amazon Resource Name (ARN) that identifies the Auto Scaling group.
" + "documentation":"The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.
" }, "managedScaling":{ "shape":"ManagedScaling", - "documentation":"The managed scaling settings for the Auto Scaling group capacity provider.
" + "documentation":"he managed scaling settings for the Auto Scaling group capacity provider.
" }, "managedTerminationProtection":{ "shape":"ManagedTerminationProtection", @@ -1602,11 +1602,11 @@ }, "startTimeout":{ "shape":"BoxedInteger", - "documentation":"Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state.
When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires the following platforms:
Linux platform version 1.3.0 or later.
Windows platform version 1.0.0 or later.
For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state.
When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires the following platforms:
Linux platform version 1.3.0 or later.
Windows platform version 1.0.0 or later.
For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The valid values are 2-120 seconds.
" }, "stopTimeout":{ "shape":"BoxedInteger", - "documentation":"Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.
For tasks using the Fargate launch type, the task or service requires the following platforms:
Linux platform version 1.3.0 or later.
Windows platform version 1.0.0 or later.
The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.
For tasks that use the EC2 launch type, if the stopTimeout parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.
For tasks using the Fargate launch type, the task or service requires the following platforms:
Linux platform version 1.3.0 or later.
Windows platform version 1.0.0 or later.
The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.
For tasks that use the EC2 launch type, if the stopTimeout parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The valid values are 2-120 seconds.
" }, "hostname":{ "shape":"String", @@ -1686,7 +1686,7 @@ }, "credentialSpecs":{ "shape":"StringList", - "documentation":"A list of ARNs in SSM or Amazon S3 to a credential spec (credspeccode>) file that configures a container for Active Directory authentication. This parameter is only used with domainless authentication.
The format for each ARN is credentialspecdomainless:MyARN. Replace MyARN with the ARN in SSM or Amazon S3.
The credspec must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.
A list of ARNs in SSM or Amazon S3 to a credential spec (CredSpec) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the dockerSecurityOptions. The maximum number of ARNs is 1.
There are two formats for each ARN.
You use credentialspecdomainless:MyARN to provide a CredSpec with an additional section for a secret in Secrets Manager. You provide the login credentials to the domain in the secret.
Each task that runs on any container instance can join different domains.
You can use this format without joining the container instance to a domain.
You use credentialspec:MyARN to provide a CredSpec for a single domain.
You must join the container instance to the domain before you start any tasks that use this task definition.
In both formats, replace MyARN with the ARN in SSM or Amazon S3.
If you provide a credentialspecdomainless:MyARN, the credspec must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.
Container definitions are used in task definitions to describe the different containers that are launched as part of a task.
" @@ -1870,7 +1870,7 @@ "documentation":"The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU.
" } }, - "documentation":"The overrides that are sent to a container. An empty container override can be passed in. An example of an empty container override is {\"containerOverrides\": [ ] }. If a non-empty container override is specified, the name parameter must be included.
The overrides that are sent to a container. An empty container override can be passed in. An example of an empty container override is {\"containerOverrides\": [ ] }. If a non-empty container override is specified, the name parameter must be included.
You can use Secrets Manager or Amazon Web Services Systems Manager Parameter Store to store the sensitive data. For more information, see Retrieve secrets through environment variables in the Amazon ECS Developer Guide.
" }, "ContainerOverrides":{ "type":"list", @@ -2119,7 +2119,7 @@ }, "taskDefinition":{ "shape":"String", - "documentation":"The task definition for the tasks in the task set to use.
" + "documentation":"The task definition for the tasks in the task set to use. If a revision isn't specified, the latest ACTIVE revision is used.
Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is on, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.
" } }, - "documentation":"The deployment circuit breaker can only be used for services using the rolling update (ECS) deployment type.
The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. If it is turned on, a service deployment will transition to a failed state and stop launching new tasks. You can also configure Amazon ECS to roll back your service to the last completed deployment after a failure. For more information, see Rolling update in the Amazon Elastic Container Service Developer Guide.
" + "documentation":"The deployment circuit breaker can only be used for services using the rolling update (ECS) deployment type.
The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. If it is turned on, a service deployment will transition to a failed state and stop launching new tasks. You can also configure Amazon ECS to roll back your service to the last completed deployment after a failure. For more information, see Rolling update in the Amazon Elastic Container Service Developer Guide.
For more information about API failure reasons, see API failure reasons in the Amazon Elastic Container Service Developer Guide.
" }, "DeploymentConfiguration":{ "type":"structure", @@ -2953,7 +2953,7 @@ "documentation":"The file type to use. The only supported value is s3.
A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information about the environment variable file syntax, see Declare default environment variables in file.
If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying environment variables in the Amazon Elastic Container Service Developer Guide.
This parameter is only supported for tasks hosted on Fargate using the following platform versions:
Linux platform version 1.4.0 or later.
Windows platform version 1.0.0 or later.
A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information about the environment variable file syntax, see Declare default environment variables in file.
If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying environment variables in the Amazon Elastic Container Service Developer Guide.
You must use the following platforms for the Fargate launch type:
Linux platform version 1.4.0 or later.
Windows platform version 1.0.0 or later.
The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.
A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a Classic Load Balancer, omit the target group ARN.
For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.
The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.
A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer.
For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.
The name of the load balancer to associate with the Amazon ECS service or task set.
A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.
" + "documentation":"The name of the load balancer to associate with the Amazon ECS service or task set.
If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.
" }, "containerName":{ "shape":"String", @@ -3855,7 +3855,7 @@ "documentation":"The secrets to pass to the log configuration. For more information, see Specifying sensitive data in the Amazon Elastic Container Service Developer Guide.
" } }, - "documentation":"The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .
By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.
Understand the following when specifying a log configuration for your containers.
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the valid values below). Additional log drivers may be available in future releases of the Amazon ECS container agent.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.
For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.
The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .
By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.
Understand the following when specifying a log configuration for your containers.
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.
For tasks on Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.
For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.
For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.
The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 1 is used.
The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 10000 is used.
The port number on the container instance to reserve for your container.
If you specify a containerPortRange, leave this field empty and the value of the hostPort is set as follows:
For containers in a task with the awsvpc network mode, the hostPort is set to the same value as the containerPort. This is a static mapping strategy.
For containers in a task with the bridge network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy.
If you use containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort.
If you use containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.
The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.
The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.
The port number on the container instance to reserve for your container.
If you specify a containerPortRange, leave this field empty and the value of the hostPort is set as follows:
For containers in a task with the awsvpc network mode, the hostPort is set to the same value as the containerPort. This is a static mapping strategy.
For containers in a task with the bridge network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy.
If you use containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort.
If you use containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.
The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 (Linux) or 49152 through 65535 (Windows) is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.
The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.
The stop code indicating why a task was stopped. The stoppedReason might contain additional details.
The following are valid values:
TaskFailedToStart
EssentialContainerExited
UserInitiated
TerminationNotice
ServiceSchedulerInitiated
SpotInterruption
The stop code indicating why a task was stopped. The stoppedReason might contain additional details.
For more information about stop code, see Stopped tasks error codes in the Amazon ECS User Guide.
The following are valid values:
TaskFailedToStart
EssentialContainerExited
UserInitiated
TerminationNotice
ServiceSchedulerInitiated
SpotInterruption
The Unix timestamp for the time when the task stops. More specifically, it's for the time when the task transitions from the RUNNING state to STOPPED.
The Unix timestamp for the time when the task stops. More specifically, it's for the time when the task transitions from the RUNNING state to STOPPING.
The task launch types the task definition was validated against. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
" + "documentation":"The task launch types the task definition was validated against. The valid values are EC2, FARGATE, and EXTERNAL. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
This is required wwhen you use an Amazon EFS volume.
" }, "host":{ "shape":"HostVolumeProperties", From 0b7e89951206bdd115cc5750a83494cf5282c8f3 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 4 Aug 2023 18:18:49 +0000 Subject: [PATCH 114/270] Updated endpoints.json and partitions.json. --- .changes/next-release/feature-AWSSDKforJavav2-0443982.json | 6 ++++++ .../amazon/awssdk/regions/internal/region/endpoints.json | 1 + 2 files changed, 7 insertions(+) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json new file mode 100644 index 000000000000..e5b5ee3ca5e3 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." +} diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 1e96eeb5812d..2846c824796b 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -13280,6 +13280,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, From 12e114dd3f8c658fca1fa005a7edf5e8cb5fc938 Mon Sep 17 00:00:00 2001 From: AWS <> Date: Fri, 4 Aug 2023 18:19:59 +0000 Subject: [PATCH 115/270] Release 2.20.120. Updated CHANGELOG.md, README.md and all pom.xml. --- .changes/2.20.120.json | 42 +++++++++++++++++++ ...erPrivateCertificateAuthority-cf7a8db.json | 6 --- .../feature-AWSDataSync-8ffdea3.json | 6 --- .../feature-AWSSDKforJavav2-0443982.json | 6 --- .../feature-AmazonConnectService-2293d92.json | 6 --- ...ure-AmazonEC2ContainerService-3e7a97e.json | 6 --- ...eature-AmazonSageMakerService-8ec4854.json | 6 --- CHANGELOG.md | 25 +++++++++++ README.md | 8 ++-- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 2 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- core/sdk-core/pom.xml | 2 +- http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 2 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- services-custom/iam-policy-builder/pom.xml | 2 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- services/glue/pom.xml | 2 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- services/medialive/pom.xml | 2 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- 425 files changed, 487 insertions(+), 456 deletions(-) create mode 100644 .changes/2.20.120.json delete mode 100644 .changes/next-release/feature-AWSCertificateManagerPrivateCertificateAuthority-cf7a8db.json delete mode 100644 .changes/next-release/feature-AWSDataSync-8ffdea3.json delete mode 100644 .changes/next-release/feature-AWSSDKforJavav2-0443982.json delete mode 100644 .changes/next-release/feature-AmazonConnectService-2293d92.json delete mode 100644 .changes/next-release/feature-AmazonEC2ContainerService-3e7a97e.json delete mode 100644 .changes/next-release/feature-AmazonSageMakerService-8ec4854.json diff --git a/.changes/2.20.120.json b/.changes/2.20.120.json new file mode 100644 index 000000000000..f8f88ca6099d --- /dev/null +++ b/.changes/2.20.120.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.120", + "date": "2023-08-04", + "entries": [ + { + "type": "feature", + "category": "AWS Certificate Manager Private Certificate Authority", + "contributor": "", + "description": "Documentation correction for AWS Private CA" + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "Display cloud storage used capacity at a cluster level." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This is a documentation update to address various tickets." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AWSCertificateManagerPrivateCertificateAuthority-cf7a8db.json b/.changes/next-release/feature-AWSCertificateManagerPrivateCertificateAuthority-cf7a8db.json deleted file mode 100644 index a9c7ebb540c1..000000000000 --- a/.changes/next-release/feature-AWSCertificateManagerPrivateCertificateAuthority-cf7a8db.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS Certificate Manager Private Certificate Authority", - "contributor": "", - "description": "Documentation correction for AWS Private CA" -} diff --git a/.changes/next-release/feature-AWSDataSync-8ffdea3.json b/.changes/next-release/feature-AWSDataSync-8ffdea3.json deleted file mode 100644 index e370e86590df..000000000000 --- a/.changes/next-release/feature-AWSDataSync-8ffdea3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS DataSync", - "contributor": "", - "description": "Display cloud storage used capacity at a cluster level." -} diff --git a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json b/.changes/next-release/feature-AWSSDKforJavav2-0443982.json deleted file mode 100644 index e5b5ee3ca5e3..000000000000 --- a/.changes/next-release/feature-AWSSDKforJavav2-0443982.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "AWS SDK for Java v2", - "contributor": "", - "description": "Updated endpoint and partition metadata." -} diff --git a/.changes/next-release/feature-AmazonConnectService-2293d92.json b/.changes/next-release/feature-AmazonConnectService-2293d92.json deleted file mode 100644 index 227a5de7150a..000000000000 --- a/.changes/next-release/feature-AmazonConnectService-2293d92.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon Connect Service", - "contributor": "", - "description": "Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile." -} diff --git a/.changes/next-release/feature-AmazonEC2ContainerService-3e7a97e.json b/.changes/next-release/feature-AmazonEC2ContainerService-3e7a97e.json deleted file mode 100644 index 94233204829a..000000000000 --- a/.changes/next-release/feature-AmazonEC2ContainerService-3e7a97e.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon EC2 Container Service", - "contributor": "", - "description": "This is a documentation update to address various tickets." -} diff --git a/.changes/next-release/feature-AmazonSageMakerService-8ec4854.json b/.changes/next-release/feature-AmazonSageMakerService-8ec4854.json deleted file mode 100644 index 56f6008ed8d8..000000000000 --- a/.changes/next-release/feature-AmazonSageMakerService-8ec4854.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "feature", - "category": "Amazon SageMaker Service", - "contributor": "", - "description": "Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object" -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 5321487d975d..d99c1c15b593 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +# __2.20.120__ __2023-08-04__ +## __AWS Certificate Manager Private Certificate Authority__ + - ### Features + - Documentation correction for AWS Private CA + +## __AWS DataSync__ + - ### Features + - Display cloud storage used capacity at a cluster level. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Connect Service__ + - ### Features + - Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile. + +## __Amazon EC2 Container Service__ + - ### Features + - This is a documentation update to address various tickets. + +## __Amazon SageMaker Service__ + - ### Features + - Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object + # __2.20.119__ __2023-08-03__ ## __AWS Cloud9__ - ### Features diff --git a/README.md b/README.md index 1eae6172c61d..c8f5899aa577 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same ver+ * If content length of this {@link AsyncRequestBody} is present, each divided {@link AsyncRequestBody} is delivered to the + * subscriber right after it's initialized. + *
+ * If content length is null, it is sent after the entire content for that chunk is buffered.
+ * In this case, the configured {@code maxMemoryUsageInBytes} must be larger than or equal to {@code chunkSizeInBytes}.
+ *
+ * @see AsyncRequestBodySplitConfiguration
+ */
+ default SdkPublisher If content length is known, each {@link AsyncRequestBody} is sent to the subscriber right after it's initialized.
+ * Otherwise, it is sent after the entire content for that chunk is buffered. This is required to get content length.
+ */
+@SdkInternalApi
+public class SplittingPublisher implements SdkPublisher
+ * Note: The multipart operation for {@link S3AsyncClient#getObject(GetObjectRequest, AsyncResponseTransformer)} is
+ * temporarily disabled and will result in throwing a {@link UnsupportedOperationException} if called when configured for
+ * multipart operation.
+ */
+@SdkPublicApi
+public final class MultipartConfiguration implements ToCopyableBuilder
+ *
+ * Default value: 8 Mib
+ *
+ * @param thresholdInBytes the value of the threshold to set.
+ * @return an instance of this builder.
+ */
+ Builder thresholdInBytes(Long thresholdInBytes);
+
+ /**
+ * Indicates the value of the configured threshold.
+ * @return the value of the threshold.
+ */
+ Long thresholdInBytes();
+
+ /**
+ * Configures the part size, in bytes, to be used in each individual part requests.
+ * Only used for putObject and copyObject operations.
+ *
+ * When uploading large payload, the size of the payload of each individual part requests might actually be
+ * bigger than
+ * the configured value since there is a limit to the maximum number of parts possible per multipart request. If the
+ * configured part size would lead to a number of parts higher than the maximum allowed, a larger part size will be
+ * calculated instead to allow fewer part to be uploaded, to avoid the limit imposed on the maximum number of parts.
+ *
+ * In the case where the {@code minimumPartSizeInBytes} is set to a value higher than the {@code thresholdInBytes}, when
+ * the client receive a request with a size smaller than a single part multipart operation will NOT be performed
+ * even if the size of the request is larger than the threshold.
+ *
+ * Default value: 8 Mib
+ *
+ * @param minimumPartSizeInBytes the value of the part size to set
+ * @return an instance of this builder.
+ */
+ Builder minimumPartSizeInBytes(Long minimumPartSizeInBytes);
+
+ /**
+ * Indicated the value of the part configured size.
+ * @return the value of the part size
+ */
+ Long minimumPartSizeInBytes();
+
+ /**
+ * Configures the maximum amount of memory, in bytes, the SDK will use to buffer content of requests in memory.
+ * Increasing this value may lead to better performance at the cost of using more memory.
+ *
+ * Default value: If not specified, the SDK will use the equivalent of four parts worth of memory, so 32 Mib by default.
+ *
+ * @param apiCallBufferSizeInBytes the value of the maximum memory usage.
+ * @return an instance of this builder.
+ */
+ Builder apiCallBufferSizeInBytes(Long apiCallBufferSizeInBytes);
+
+ /**
+ * Indicates the value of the maximum memory usage that the SDK will use.
+ * @return the value of the maximum memory usage.
+ */
+ Long apiCallBufferSizeInBytes();
+ }
+
+ private static class DefaultMultipartConfigBuilder implements Builder {
+ private Long thresholdInBytes;
+ private Long minimumPartSizeInBytes;
+ private Long apiCallBufferSizeInBytes;
+
+ public Builder thresholdInBytes(Long thresholdInBytes) {
+ this.thresholdInBytes = thresholdInBytes;
+ return this;
+ }
+
+ public Long thresholdInBytes() {
+ return this.thresholdInBytes;
+ }
+
+ public Builder minimumPartSizeInBytes(Long minimumPartSizeInBytes) {
+ this.minimumPartSizeInBytes = minimumPartSizeInBytes;
+ return this;
+ }
+
+ public Long minimumPartSizeInBytes() {
+ return this.minimumPartSizeInBytes;
+ }
+
+ @Override
+ public Builder apiCallBufferSizeInBytes(Long maximumMemoryUsageInBytes) {
+ this.apiCallBufferSizeInBytes = maximumMemoryUsageInBytes;
+ return this;
+ }
+
+ @Override
+ public Long apiCallBufferSizeInBytes() {
+ return apiCallBufferSizeInBytes;
+ }
+
+ @Override
+ public MultipartConfiguration build() {
+ return new MultipartConfiguration(this);
+ }
+ }
+}
diff --git a/services/s3/src/main/resources/codegen-resources/customization.config b/services/s3/src/main/resources/codegen-resources/customization.config
index 71f5a2aef642..f33272a2a636 100644
--- a/services/s3/src/main/resources/codegen-resources/customization.config
+++ b/services/s3/src/main/resources/codegen-resources/customization.config
@@ -236,6 +236,13 @@
"syncClientDecorator": "software.amazon.awssdk.services.s3.internal.client.S3SyncClientDecorator",
"asyncClientDecorator": "software.amazon.awssdk.services.s3.internal.client.S3AsyncClientDecorator",
"useGlobalEndpoint": true,
+ "multipartCustomization": {
+ "multipartConfigurationClass": "software.amazon.awssdk.services.s3.multipart.MultipartConfiguration",
+ "multipartConfigMethodDoc": "Configuration for multipart operation of this client.",
+ "multipartEnableMethodDoc": "Enables automatic conversion of put and copy method to their equivalent multipart operation.",
+ "contextParamEnabledKey": "S3AsyncClientDecorator.MULTIPART_ENABLED_KEY",
+ "contextParamConfigKey": "S3AsyncClientDecorator.MULTIPART_CONFIGURATION_KEY"
+ },
"interceptors": [
"software.amazon.awssdk.services.s3.internal.handlers.StreamingRequestInterceptor",
"software.amazon.awssdk.services.s3.internal.handlers.CreateBucketInterceptor",
diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java
index 0d0e681c3645..bd5c34f91048 100644
--- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java
+++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java
@@ -24,7 +24,6 @@
import static org.mockito.Mockito.when;
import java.util.List;
-import java.util.Random;
import java.util.concurrent.CompletableFuture;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -34,6 +33,7 @@
import org.mockito.stubbing.Answer;
import software.amazon.awssdk.core.exception.SdkClientException;
import software.amazon.awssdk.services.s3.S3AsyncClient;
+import software.amazon.awssdk.services.s3.internal.multipart.CopyObjectHelper;
import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
@@ -48,10 +48,7 @@
import software.amazon.awssdk.services.s3.model.NoSuchBucketException;
import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest;
import software.amazon.awssdk.services.s3.model.UploadPartCopyResponse;
-import software.amazon.awssdk.utils.BinaryUtils;
import software.amazon.awssdk.utils.CompletableFutureUtils;
-import software.amazon.awssdk.utils.Md5Utils;
-
class CopyObjectHelperTest {
private static final String SOURCE_BUCKET = "source";
@@ -64,10 +61,13 @@ class CopyObjectHelperTest {
private S3AsyncClient s3AsyncClient;
private CopyObjectHelper copyHelper;
+ private static final long PART_SIZE = 1024L;
+ private static final long UPLOAD_THRESHOLD = PART_SIZE * 2;
+
@BeforeEach
public void setUp() {
s3AsyncClient = Mockito.mock(S3AsyncClient.class);
- copyHelper = new CopyObjectHelper(s3AsyncClient, PART_SIZE_BYTES);
+ copyHelper = new CopyObjectHelper(s3AsyncClient, PART_SIZE, UPLOAD_THRESHOLD);
}
@Test
@@ -119,6 +119,25 @@ void singlePartCopy_happyCase_shouldSucceed() {
assertThat(future.join()).isEqualTo(expectedResponse);
}
+ @Test
+ void copy_doesNotExceedThreshold_shouldUseSingleObjectCopy() {
+
+ CopyObjectRequest copyObjectRequest = copyObjectRequest();
+
+ stubSuccessfulHeadObjectCall(2000L);
+
+ CopyObjectResponse expectedResponse = CopyObjectResponse.builder().build();
+ CompletableFuture If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. Introduction The Amazon Interactive Video Service (IVS) stage API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors. Terminology: The IVS stage API sometimes is referred to as the IVS RealTime API. A participant token is an authorization token used to publish/subscribe to a stage. A participant object represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID There is a 1:1 mapping between participant tokens and participants. Resources The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS): Stage — A stage is a virtual space where multiple participants can exchange audio and video in real time. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS stage API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage. At most 50 tags can be applied to a resource. Stages Endpoints CreateParticipantToken — Creates an additional token for a specified stage. This can be done after stage creation or when tokens expire. CreateStage — Creates a new stage (and optionally participant tokens). DeleteStage — Shuts down and deletes the specified stage (disconnecting all participants). DisconnectParticipant — Disconnects a specified participant and revokes the participant permanently from a specified stage. GetParticipant — Gets information about the specified participant token. GetStage — Gets information for the specified stage. GetStageSession — Gets information for the specified stage session. ListParticipantEvents — Lists events for a specified participant that occurred during a specified stage session. ListParticipants — Lists all participants in a specified stage session. ListStages — Gets summary information about all stages in your account, in the AWS region where the API request is processed. ListStageSessions — Gets all sessions for a specified stage. UpdateStage — Updates a stage’s configuration. Tags Endpoints ListTagsForResource — Gets information about AWS tags for the specified ARN. TagResource — Adds or updates tags for the AWS resource with the specified ARN. UntagResource — Removes tags from the resource with the specified ARN. Introduction The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors. Terminology: A stage is a virtual space where participants can exchange video in real time. A participant token is a token that authenticates a participant when they join a stage. A participant object represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID There is a 1:1 mapping between participant tokens and participants. Resources The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS Real-Time Streaming): Stage — A stage is a virtual space where participants can exchange video in real time. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage. At most 50 tags can be applied to a resource. Stages Endpoints CreateParticipantToken — Creates an additional token for a specified stage. This can be done after stage creation or when tokens expire. CreateStage — Creates a new stage (and optionally participant tokens). DeleteStage — Shuts down and deletes the specified stage (disconnecting all participants). DisconnectParticipant — Disconnects a specified participant and revokes the participant permanently from a specified stage. GetParticipant — Gets information about the specified participant token. GetStage — Gets information for the specified stage. GetStageSession — Gets information for the specified stage session. ListParticipantEvents — Lists events for a specified participant that occurred during a specified stage session. ListParticipants — Lists all participants in a specified stage session. ListStages — Gets summary information about all stages in your account, in the AWS region where the API request is processed. ListStageSessions — Gets all sessions for a specified stage. UpdateStage — Updates a stage’s configuration. Tags Endpoints ListTagsForResource — Gets information about AWS tags for the specified ARN. TagResource — Adds or updates tags for the AWS resource with the specified ARN. UntagResource — Removes tags from the resource with the specified ARN. Associates a If the If the Associates a If the If the If Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify The response from To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of Dataset creation fails if a terminal error occurs ( For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify The response from To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of Dataset creation fails if a terminal error occurs ( For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the This API operation initiates a Face Liveness session. It returns a This API operation initiates a Face Liveness session. It returns a You can use the You can use Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the When using You can specify Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation that doesn't return any data. This operation requires permissions to perform the Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the When using When getting labels, you can specify Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation that doesn't return any data. This operation requires permissions to perform the Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection. Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier ( Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection. Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier ( Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in Note that for the Retrieves the results of a specific Face Liveness session. It requires the Retrieves the results of a specific Face Liveness session. It requires the The number of audit images returned by Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection. Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by calling StartTextDetection which returns a job identifier ( Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines. Use MaxResults parameter to limit the number of text detections returned. If there are more results than specified in Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection. Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by calling StartTextDetection which returns a job identifier ( Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines. Use MaxResults parameter to limit the number of text detections returned. If there are more results than specified in The type of the dataset. Specify The type of the dataset. Specify An array of facial attributes you want to be returned. A If you provide both, An array of facial attributes you want to be returned. A If you provide both, Note that while the FaceOccluded and EyeDirection attributes are supported when using Maximum number of labels you want the service to return in the response. The service returns the specified number of highest confidence labels. Maximum number of labels you want the service to return in the response. The service returns the specified number of highest confidence labels. Only valid when GENERAL_LABELS is specified as a feature type in the Feature input parameter. Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value. If Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value. If A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration. A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration. If no Amazon S3 bucket is defined, raw bytes are sent instead. An array of user IDs to match when listing faces in a collection. An array of user IDs to filter results with when listing faces in a collection. An array of face IDs to match when listing faces in a collection. An array of face IDs to filter results with when listing faces in a collection. Downloads an MP4 file (clip) containing the archived, on-demand media from the specified video stream over the specified time range. Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation. As a prerequisite to using GetCLip API, you must obtain an endpoint using An Amazon Kinesis video stream has the following requirements for providing data through MP4: The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be Data retention must be greater than 0. The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags. The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format. You can monitor the amount of outgoing data by monitoring the Downloads an MP4 file (clip) containing the archived, on-demand media from the specified video stream over the specified time range. Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation. As a prerequisite to using GetCLip API, you must obtain an endpoint using An Amazon Kinesis video stream has the following requirements for providing data through MP4: The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be Data retention must be greater than 0. The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags. The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format. You can monitor the amount of outgoing data by monitoring the Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents. Both the An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH: The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be Data retention must be greater than 0. The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags. The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format. The following procedure shows how to use MPEG-DASH with Kinesis Video Streams: Get an endpoint using GetDataEndpoint, specifying Retrieve the MPEG-DASH URL using Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you use with your AWS credentials. The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions: GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback. GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \" The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames. GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \" After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video. Data retrieved with this action is billable. See Pricing for details. For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits. You can monitor the amount of data that the media player consumes by monitoring the For more information about HLS, see HTTP Live Streaming on the Apple Developer site. If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again. For more information, see the Errors section at the bottom of this topic, as well as Common Errors. Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents. Both the An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH: The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be Data retention must be greater than 0. The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags. The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format. The following procedure shows how to use MPEG-DASH with Kinesis Video Streams: Get an endpoint using GetDataEndpoint, specifying Retrieve the MPEG-DASH URL using Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you use with your Amazon Web Services credentials. The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions: GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback. GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \" The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames. GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \" After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video. Data retrieved with this action is billable. See Pricing for details. For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits. You can monitor the amount of data that the media player consumes by monitoring the For more information about HLS, see HTTP Live Streaming on the Apple Developer site. If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again. For more information, see the Errors section at the bottom of this topic, as well as Common Errors. Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents. Both the An Amazon Kinesis video stream has the following requirements for providing data through HLS: The media must contain h.264 or h.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be Data retention must be greater than 0. The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags. The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7). Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification. The following procedure shows how to use HLS with Kinesis Video Streams: Get an endpoint using GetDataEndpoint, specifying Retrieve the HLS URL using Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials. The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions: GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \" The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames. GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \" After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video. Data retrieved with this action is billable. See Pricing for details. GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream. If the Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing. A streaming session URL must not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits. You can monitor the amount of data that the media player consumes by monitoring the For more information about HLS, see HTTP Live Streaming on the Apple Developer site. If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again. For more information, see the Errors section at the bottom of this topic, as well as Common Errors. Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents. Both the An Amazon Kinesis video stream has the following requirements for providing data through HLS: For streaming video, the media must contain H.264 or H.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be Data retention must be greater than 0. The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags. The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7). Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification. The following procedure shows how to use HLS with Kinesis Video Streams: Get an endpoint using GetDataEndpoint, specifying Retrieve the HLS URL using Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your Amazon Web Services credentials. The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions: GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \" The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames. GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \" For the HLS streaming session, in-track codec private data (CPD) changes are supported. After the first media fragment is made available in a streaming session, fragments can contain CPD changes for each track. Therefore, the fragments in a session can have a different resolution, bit rate, or other information in the CPD without interrupting playback. However, any change made in the track number or track codec format can return an error when those different media fragments are loaded. For example, streaming will fail if the fragments in the stream change from having only video to having both audio and video, or if an AAC audio track is changed to an ALAW audio track. For each streaming session, only 500 CPD changes are allowed. Data retrieved with this action is billable. For information, see Pricing. GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream. If the Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing. A streaming session URL must not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits. You can monitor the amount of data that the media player consumes by monitoring the For more information about HLS, see HTTP Live Streaming on the Apple Developer site. If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again. For more information, see the Errors section at the bottom of this topic, as well as Common Errors. Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream. You must first call the For limits, see Kinesis Video Streams Limits. If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again. For more information, see the Errors section at the bottom of this topic, as well as Common Errors. Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream. You must first call the For limits, see Kinesis Video Streams Limits. If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again. For more information, see the Errors section at the bottom of this topic, as well as Common Errors. Returns a list of Fragment objects from the specified stream and timestamp range within the archived data. Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to You must first call the If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again. For more information, see the Errors section at the bottom of this topic, as well as Common Errors. Returns a list of Fragment objects from the specified stream and timestamp range within the archived data. Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to You must first call the If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information: Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again. For more information, see the Errors section at the bottom of this topic, as well as Common Errors. The timestamp from the AWS server corresponding to the fragment. The timestamp from the Amazon Web Services server corresponding to the fragment. The end timestamp for the range of images to be generated. The end timestamp for the range of images to be generated. If the time range between The time interval in milliseconds (ms) at which the images need to be generated from the stream. The minimum value that can be provided is 3000 ms. If the timestamp range is less than the sampling interval, the Image from the The minimum value of 3000 ms is a soft limit. If needed, a lower sampling frequency can be requested. The time interval in milliseconds (ms) at which the images need to be generated from the stream, with a default of 3000 ms. The minimum value that can be provided is 200 ms. If the timestamp range is less than the sampling interval, the Image from the The minimum value of 200 ms is a hard limit. The maximum number of images to be returned by the API. The default limit is 100 images per API response. The additional results will be paginated. The maximum number of images to be returned by the API. The default limit is 25 images per API response. Providing a The payload that Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk. AWS_KINESISVIDEO_SERVER_SIDE_TIMESTAMP - Server-side timestamp of the fragment. AWS_KINESISVIDEO_PRODUCER_SIDE_TIMESTAMP - Producer-side timestamp of the fragment. The following tags will be included if an exception occurs: AWS_KINESISVIDEO_FRAGMENT_NUMBER - The number of the fragment that threw the exception AWS_KINESISVIDEO_EXCEPTION_ERROR_CODE - The integer code of the exception AWS_KINESISVIDEO_EXCEPTION_MESSAGE - A text description of the exception The payload that Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk. AWS_KINESISVIDEO_SERVER_SIDE_TIMESTAMP - Server-side timestamp of the fragment. AWS_KINESISVIDEO_PRODUCER_SIDE_TIMESTAMP - Producer-side timestamp of the fragment. The following tags will be included if an exception occurs: AWS_KINESISVIDEO_FRAGMENT_NUMBER - The number of the fragment that threw the exception AWS_KINESISVIDEO_EXCEPTION_ERROR_CODE - The integer code of the AWS_KINESISVIDEO_EXCEPTION_MESSAGE - A text description of the exception Requests the import of a resource as an Service Catalog provisioned product that is associated to an Service Catalog product and provisioning artifact. Once imported, all supported governance actions are supported on the provisioned product. Resource import only supports CloudFormation stack ARNs. CloudFormation StackSets, and non-root nested stacks are not supported. The CloudFormation stack must have one of the following statuses to be imported: Import of the resource requires that the CloudFormation stack template matches the associated Service Catalog product provisioning artifact. When you import an existing CloudFormation stack into a portfolio, constraints that are associated with the product aren't applied during the import process. The constraints are applied after you call The user or role that performs this operation must have the Requests the import of a resource as an Service Catalog provisioned product that is associated to an Service Catalog product and provisioning artifact. Once imported, all supported governance actions are supported on the provisioned product. Resource import only supports CloudFormation stack ARNs. CloudFormation StackSets, and non-root nested stacks, are not supported. The CloudFormation stack must have one of the following statuses to be imported: Import of the resource requires that the CloudFormation stack template matches the associated Service Catalog product provisioning artifact. When you import an existing CloudFormation stack into a portfolio, Service Catalog does not apply the product's associated constraints during the import process. Service Catalog applies the constraints after you call The user or role that performs this operation must have the You can only import one provisioned product at a time. The product's CloudFormation stack must have the Updates the specified portfolio share. You can use this API to enable or disable The portfolio share cannot be updated if the You must provide the If the portfolio is shared to both an external account and an organization node, and both shares need to be updated, you must invoke This API cannot be used for removing the portfolio share. You must use When you associate a principal with portfolio, a potential privilege escalation path may occur when that portfolio is then shared with other accounts. For a user in a recipient account who is not an Service Catalog Admin, but still has the ability to create Principals (Users/Groups/Roles), that user could create a role that matches a principal name association for the portfolio. Although this user may not know which principal names are associated through Service Catalog, they may be able to guess the user. If this potential escalation path is a concern, then Service Catalog recommends using Updates the specified portfolio share. You can use this API to enable or disable The portfolio share cannot be updated if the You must provide the If the portfolio is shared to both an external account and an organization node, and both shares need to be updated, you must invoke This API cannot be used for removing the portfolio share. You must use When you associate a principal with portfolio, a potential privilege escalation path may occur when that portfolio is then shared with other accounts. For a user in a recipient account who is not an Service Catalog Admin, but still has the ability to create Principals (Users/Groups/Roles), that user could create a role that matches a principal name association for the portfolio. Although this user may not know which principal names are associated through Service Catalog, they may be able to guess the user. If this potential escalation path is a concern, then Service Catalog recommends using The user to which the access level applies. The only supported value is The user to which the access level applies. The only supported value is The access level to use to filter results. The record identifier of the last request performed on this provisioned product of the following types: ProvisionedProduct UpdateProvisionedProduct ExecuteProvisionedProductPlan TerminateProvisionedProduct The record identifier of the last request performed on this provisioned product of the following types: ProvisionProduct UpdateProvisionedProduct ExecuteProvisionedProductPlan TerminateProvisionedProduct The record identifier of the last successful request performed on this provisioned product of the following types: ProvisionedProduct UpdateProvisionedProduct ExecuteProvisionedProductPlan TerminateProvisionedProduct The record identifier of the last successful request performed on this provisioned product of the following types: ProvisionProduct UpdateProvisionedProduct ExecuteProvisionedProductPlan TerminateProvisionedProduct The record identifier of the last request performed on this provisioned product of the following types: ProvisionedProduct UpdateProvisionedProduct ExecuteProvisionedProductPlan TerminateProvisionedProduct The record identifier of the last request performed on this provisioned product of the following types: ProvisionProduct UpdateProvisionedProduct ExecuteProvisionedProductPlan TerminateProvisionedProduct The record identifier of the last successful request performed on this provisioned product of the following types: ProvisionedProduct UpdateProvisionedProduct ExecuteProvisionedProductPlan TerminateProvisionedProduct The record identifier of the last successful request performed on this provisioned product of the following types: ProvisionProduct UpdateProvisionedProduct ExecuteProvisionedProductPlan TerminateProvisionedProduct The type of provisioning artifact. The type of provisioning artifact. The type of provisioning artifact. The type of provisioning artifact. If set to true, Service Catalog stops validating the specified provisioning artifact even if it is invalid. If set to true, Service Catalog stops validating the specified provisioning artifact even if it is invalid. Service Catalog does not support template validation for the Information about a provisioning artifact (also known as a version) for a product. The type of provisioned product. The supported values are The type of provisioned product. The supported values are The search filters. When the key is Example: The search filters. When the key is Example: This action creates a legal hold on a recovery point (backup). A legal hold is a restraint on altering or deleting a backup until an authorized user cancels the legal hold. Any actions to delete or disassociate a recovery point will fail with an error if one or more active legal holds are on the recovery point. This request creates a logical container where backups are stored. This request includes a name, optionally one or more resource tags, an encryption key, and a request ID. Do not include sensitive data, such as passport numbers, in the name of a backup vault. Returns an array of resources successfully backed up by Backup, including the time the resource was saved, an Amazon Resource Name (ARN) of the resource, and a resource type. This request lists the protected resources corresponding to each backup vault. The current state of a resource recovery point. The current state of a backup job. A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors. During the start window, the backup job status remains in A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors. This parameter has a maximum value of 100 years (52,560,000 minutes). During the start window, the backup job status remains in The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types. The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types. This parameter has a maximum value of 100 years (36,500 days). This is the name of the vault that is being created. These are the tags that will be included in the newly-created vault. This is the ID of the creation request. This setting specifies the minimum retention period that the vault retains its recovery points. If this parameter is not specified, no minimum retention period is enforced. If specified, any backup or copy job to the vault must have a lifecycle policy with a retention period equal to or longer than the minimum retention period. If a job retention period is shorter than that minimum retention period, then the vault fails the backup or copy job, and you should either modify your lifecycle settings or use a different vault. This is the setting that specifies the maximum retention period that the vault retains its recovery points. If this parameter is not specified, Backup does not enforce a maximum retention period on the recovery points in the vault (allowing indefinite storage). If specified, any backup or copy job to the vault must have a lifecycle policy with a retention period equal to or shorter than the maximum retention period. If the job retention period is longer than that maximum retention period, then the vault fails the backup or copy job, and you should either modify your lifecycle settings or use a different vault. The name of a logical container where backups are stored. Logically air-gapped backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens. This is the ARN (Amazon Resource Name) of the vault being created. The date and time when the vault was created. This value is in Unix format, Coordinated Universal Time (UTC), and accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. This is the current state of the vault. The current state of a resource recovery point. The current state of a backup job. The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens. This is the account ID of the specified backup vault. An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, This is the type of vault described. The server-side encryption key that is used to protect your backups; for example, An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, This is the account ID of the specified backup vault. An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, This is the account ID of the specified backup vault. This parameter will sort the list of vaults by vault type. This parameter will sort the list of vaults by shared vaults. The next item following a partial list of returned items. For example, if a request is made to return This is the list of protected resources by backup vault within the vault(s) you specify by name. This is the list of protected resources by backup vault within the vault(s) you specify by account ID. The next item following a partial list of returned items. For example, if a request is made to return The maximum number of items to be returned. These are the results returned for the request ListProtectedResourcesByBackupVault. The next item following a partial list of returned items. For example, if a request is made to return This parameter will sort the list of recovery points by account ID. The next item following a partial list of returned items. For example, if a request is made to return A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours. If this value is included, it must be at least 60 minutes to avoid errors. During the start window, the backup job status remains in A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours. If this value is included, it must be at least 60 minutes to avoid errors. This parameter has a maximum value of 100 years (52,560,000 minutes). During the start window, the backup job status remains in A value in minutes during which a successfully started backup must complete, or else Backup will cancel the job. This value is optional. This value begins counting down from when the backup was scheduled. It does not add additional time for A value in minutes during which a successfully started backup must complete, or else Backup will cancel the job. This value is optional. This value begins counting down from when the backup was scheduled. It does not add additional time for Like The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types. The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold. Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types. This parameter has a maximum value of 100 years (36,500 days). Represents the input of a This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Cache cluster message: Replication group message: Cache cluster message: Cache cluster message: For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. Async API to test connection between source and target replication group. The number of node groups you wish to add Total number of node groups you want The ID of the replication group to which data is to be migrated. List of endpoints from which data should be migrated. List should have only one element.
+ * By default, it is the same as {@link #minimumPartSizeInBytes(Long)}.
+ *
+ * @param thresholdInBytes the value of the threshold to set.
+ * @return an instance of this builder.
+ */
+ S3CrtAsyncClientBuilder thresholdInBytes(Long thresholdInBytes);
+
@Override
S3AsyncClient build();
}
\ No newline at end of file
diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java
index 09d0c95fbfb0..284748f163bd 100644
--- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java
+++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java
@@ -70,9 +70,10 @@ private DefaultS3CrtAsyncClient(DefaultS3CrtClientBuilder builder) {
super(initializeS3AsyncClient(builder));
long partSizeInBytes = builder.minimalPartSizeInBytes == null ? DEFAULT_PART_SIZE_IN_BYTES :
builder.minimalPartSizeInBytes;
+ long thresholdInBytes = builder.thresholdInBytes == null ? partSizeInBytes : builder.thresholdInBytes;
this.copyObjectHelper = new CopyObjectHelper((S3AsyncClient) delegate(),
partSizeInBytes,
- partSizeInBytes);
+ thresholdInBytes);
}
@Override
@@ -117,6 +118,7 @@ private static S3CrtAsyncHttpClient.Builder initializeS3CrtAsyncHttpClient(Defau
Validate.isPositiveOrNull(builder.maxConcurrency, "maxConcurrency");
Validate.isPositiveOrNull(builder.targetThroughputInGbps, "targetThroughputInGbps");
Validate.isPositiveOrNull(builder.minimalPartSizeInBytes, "minimalPartSizeInBytes");
+ Validate.isPositiveOrNull(builder.thresholdInBytes, "thresholdInBytes");
S3NativeClientConfiguration.Builder nativeClientBuilder =
S3NativeClientConfiguration.builder()
@@ -128,7 +130,8 @@ private static S3CrtAsyncHttpClient.Builder initializeS3CrtAsyncHttpClient(Defau
.endpointOverride(builder.endpointOverride)
.credentialsProvider(builder.credentialsProvider)
.readBufferSizeInBytes(builder.readBufferSizeInBytes)
- .httpConfiguration(builder.httpConfiguration);
+ .httpConfiguration(builder.httpConfiguration)
+ .thresholdInBytes(builder.thresholdInBytes);
if (builder.retryConfiguration != null) {
nativeClientBuilder.standardRetryOptions(
@@ -156,6 +159,7 @@ public static final class DefaultS3CrtClientBuilder implements S3CrtAsyncClientB
private ListINSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities field in CreateParticipantToken.INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities field in CreateParticipantToken. QUOTA_EXCEEDED indicates that the number of participants who want to publish/subscribe to a stage exceeds the quota; for more information, see Service Quotas. PUBLISHER_NOT_FOUND indicates that the participant tried to subscribe to a publisher that doesn’t exist.
topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.
"
+ "documentation":"
topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.
"
}
From c1e16607e3ba141d7a9e904d5ffc93954340c150 Mon Sep 17 00:00:00 2001
From: AWS <>
Date: Mon, 7 Aug 2023 18:12:30 +0000
Subject: [PATCH 119/270] Amazon Kinesis Video Streams Update: This release
enables minimum of Images SamplingInterval to be as low as 200 milliseconds
in Kinesis Video Stream Image feature.
---
...ure-AmazonKinesisVideoStreams-7b047ca.json | 6 +
.../codegen-resources/endpoint-rule-set.json | 344 ++++++++----------
.../codegen-resources/service-2.json | 8 +-
3 files changed, 162 insertions(+), 196 deletions(-)
create mode 100644 .changes/next-release/feature-AmazonKinesisVideoStreams-7b047ca.json
diff --git a/.changes/next-release/feature-AmazonKinesisVideoStreams-7b047ca.json b/.changes/next-release/feature-AmazonKinesisVideoStreams-7b047ca.json
new file mode 100644
index 000000000000..a248ac684350
--- /dev/null
+++ b/.changes/next-release/feature-AmazonKinesisVideoStreams-7b047ca.json
@@ -0,0 +1,6 @@
+{
+ "type": "feature",
+ "category": "Amazon Kinesis Video Streams",
+ "contributor": "",
+ "description": "This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature."
+}
diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/kinesisvideo/src/main/resources/codegen-resources/endpoint-rule-set.json
index f61682a8f1fc..6da12c24ec73 100644
--- a/services/kinesisvideo/src/main/resources/codegen-resources/endpoint-rule-set.json
+++ b/services/kinesisvideo/src/main/resources/codegen-resources/endpoint-rule-set.json
@@ -58,52 +58,56 @@
"type": "error"
},
{
- "conditions": [],
- "type": "tree",
- "rules": [
+ "conditions": [
{
- "conditions": [
+ "fn": "booleanEquals",
+ "argv": [
{
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "error": "Invalid Configuration: Dualstack and custom endpoint are not supported",
- "type": "error"
- },
- {
- "conditions": [],
- "endpoint": {
- "url": {
- "ref": "Endpoint"
+ "ref": "UseDualStack"
},
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
+ true
+ ]
}
- ]
+ ],
+ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported",
+ "type": "error"
+ },
+ {
+ "conditions": [],
+ "endpoint": {
+ "url": {
+ "ref": "Endpoint"
+ },
+ "properties": {},
+ "headers": {}
+ },
+ "type": "endpoint"
}
]
},
{
- "conditions": [],
+ "conditions": [
+ {
+ "fn": "isSet",
+ "argv": [
+ {
+ "ref": "Region"
+ }
+ ]
+ }
+ ],
"type": "tree",
"rules": [
{
"conditions": [
{
- "fn": "isSet",
+ "fn": "aws.partition",
"argv": [
{
"ref": "Region"
}
- ]
+ ],
+ "assign": "PartitionResult"
}
],
"type": "tree",
@@ -111,13 +115,22 @@
{
"conditions": [
{
- "fn": "aws.partition",
+ "fn": "booleanEquals",
"argv": [
{
- "ref": "Region"
- }
- ],
- "assign": "PartitionResult"
+ "ref": "UseFIPS"
+ },
+ true
+ ]
+ },
+ {
+ "fn": "booleanEquals",
+ "argv": [
+ {
+ "ref": "UseDualStack"
+ },
+ true
+ ]
}
],
"type": "tree",
@@ -127,224 +140,175 @@
{
"fn": "booleanEquals",
"argv": [
+ true,
{
- "ref": "UseFIPS"
- },
- true
+ "fn": "getAttr",
+ "argv": [
+ {
+ "ref": "PartitionResult"
+ },
+ "supportsFIPS"
+ ]
+ }
]
},
{
"fn": "booleanEquals",
"argv": [
+ true,
{
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsFIPS"
- ]
- }
- ]
- },
- {
- "fn": "booleanEquals",
+ "fn": "getAttr",
"argv": [
- true,
{
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsDualStack"
- ]
- }
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
+ "ref": "PartitionResult"
+ },
+ "supportsDualStack"
]
}
]
- },
+ }
+ ],
+ "type": "tree",
+ "rules": [
{
"conditions": [],
- "error": "FIPS and DualStack are enabled, but this partition does not support one or both",
- "type": "error"
+ "endpoint": {
+ "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",
+ "properties": {},
+ "headers": {}
+ },
+ "type": "endpoint"
}
]
},
+ {
+ "conditions": [],
+ "error": "FIPS and DualStack are enabled, but this partition does not support one or both",
+ "type": "error"
+ }
+ ]
+ },
+ {
+ "conditions": [
+ {
+ "fn": "booleanEquals",
+ "argv": [
+ {
+ "ref": "UseFIPS"
+ },
+ true
+ ]
+ }
+ ],
+ "type": "tree",
+ "rules": [
{
"conditions": [
{
"fn": "booleanEquals",
"argv": [
+ true,
{
- "ref": "UseFIPS"
- },
- true
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
+ "fn": "getAttr",
"argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsFIPS"
- ]
- }
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "type": "tree",
- "rules": [
{
- "conditions": [],
- "endpoint": {
- "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
+ "ref": "PartitionResult"
+ },
+ "supportsFIPS"
]
}
]
- },
+ }
+ ],
+ "type": "tree",
+ "rules": [
{
"conditions": [],
- "error": "FIPS is enabled but this partition does not support FIPS",
- "type": "error"
+ "endpoint": {
+ "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dnsSuffix}",
+ "properties": {},
+ "headers": {}
+ },
+ "type": "endpoint"
}
]
},
+ {
+ "conditions": [],
+ "error": "FIPS is enabled but this partition does not support FIPS",
+ "type": "error"
+ }
+ ]
+ },
+ {
+ "conditions": [
+ {
+ "fn": "booleanEquals",
+ "argv": [
+ {
+ "ref": "UseDualStack"
+ },
+ true
+ ]
+ }
+ ],
+ "type": "tree",
+ "rules": [
{
"conditions": [
{
"fn": "booleanEquals",
"argv": [
+ true,
{
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
+ "fn": "getAttr",
"argv": [
- true,
{
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsDualStack"
- ]
- }
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://kinesisvideo.{Region}.{PartitionResult#dualStackDnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
+ "ref": "PartitionResult"
+ },
+ "supportsDualStack"
]
}
]
- },
- {
- "conditions": [],
- "error": "DualStack is enabled but this partition does not support DualStack",
- "type": "error"
}
- ]
- },
- {
- "conditions": [],
+ ],
"type": "tree",
"rules": [
{
"conditions": [],
"endpoint": {
- "url": "https://kinesisvideo.{Region}.{PartitionResult#dnsSuffix}",
+ "url": "https://kinesisvideo.{Region}.{PartitionResult#dualStackDnsSuffix}",
"properties": {},
"headers": {}
},
"type": "endpoint"
}
]
+ },
+ {
+ "conditions": [],
+ "error": "DualStack is enabled but this partition does not support DualStack",
+ "type": "error"
}
]
+ },
+ {
+ "conditions": [],
+ "endpoint": {
+ "url": "https://kinesisvideo.{Region}.{PartitionResult#dnsSuffix}",
+ "properties": {},
+ "headers": {}
+ },
+ "type": "endpoint"
}
]
- },
- {
- "conditions": [],
- "error": "Invalid Configuration: Missing Region",
- "type": "error"
}
]
+ },
+ {
+ "conditions": [],
+ "error": "Invalid Configuration: Missing Region",
+ "type": "error"
}
]
}
\ No newline at end of file
diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json b/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json
index 489bea376b0f..2f761ae09479 100644
--- a/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json
+++ b/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json
@@ -462,7 +462,7 @@
{"shape":"AccessDeniedException"},
{"shape":"NoDataRetentionException"}
],
- "documentation":"SignalingChannel to a stream to store the media. There are two signaling modes that can specified :
"
+ "documentation":"StorageStatus is disabled, no data will be stored, and the StreamARN parameter will not be needed. StorageStatus is enabled, the data will be stored in the StreamARN provided. SignalingChannel to a stream to store the media. There are two signaling modes that can specified :
StorageStatus is disabled, no data will be stored, and the StreamARN parameter will not be needed. StorageStatus is enabled, the data will be stored in the StreamARN provided. StorageStatus is enabled, direct peer-to-peer (master-viewer) connections no longer occur. Peers connect directly to the storage session. You must call the JoinStorageSession API to trigger an SDP offer send and establish a connection between a peer and the storage session. train for the value of DatasetType. To create the test dataset for a project, specify test for the value of DatasetType. CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. errors lists in the JSON Lines.Status = CREATE_FAILED). Currently, you can't access the terminal error information. rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.TRAIN for the value of DatasetType. To create the test dataset for a project, specify TEST for the value of DatasetType. CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. errors lists in the JSON Lines.Status = CREATE_FAILED). Currently, you can't access the terminal error information. rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. You can use AuditImagesLimit to limit the number of audit images returned. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video. SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. If no Amazon S3 bucket is defined, raw bytes are sent instead. AuditImagesLimit to limit the number of audit images returned when GetFaceLivenessSessionResults is called. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video. GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color.GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image.MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels.
{Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} rekognition:DetectLabels action. GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color.GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image.MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. These arguments are only valid when supplying GENERAL_LABELS as a feature type.
{Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} rekognition:DetectLabels action. JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection.GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected. MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection.GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected. MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.GetFaceDetection operation, the returned values for FaceOccluded and EyeDirection will always be \"null\".sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The reference image can optionally be returned.sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. GetFaceLivenessSessionResults is defined by the AuditImagesLimit paramater when calling CreateFaceLivenessSession. Reference images are always returned when possible.JobId) When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartTextDetection. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection.GetTextDetection returns an array of detected text (TextDetections) sorted by the time the text was detected, up to 50 words per frame of video.MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection and populate the NextToken request parameter with the token value returned from the previous call to GetTextDetection.JobId) When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartTextDetection. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection.GetTextDetection returns an array of detected text (TextDetections) sorted by the time the text was detected, up to 100 words per frame of video.MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection and populate the NextToken request parameter with the token value returned from the previous call to GetTextDetection.train to create a training dataset. Specify test to create a test dataset. TRAIN to create a training dataset. Specify TEST to create a test dataset. DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using [\"DEFAULT\", \"FACE_OCCLUDED\"] or just [\"FACE_OCCLUDED\"]. You can request for all facial attributes by using [\"ALL\"]. Requesting more attributes may increase response time.[\"ALL\", \"DEFAULT\"], the service uses a logical \"AND\" operator to determine which attributes to return (in this case, all attributes). DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using [\"DEFAULT\", \"FACE_OCCLUDED\"] or just [\"FACE_OCCLUDED\"]. You can request for all facial attributes by using [\"ALL\"]. Requesting more attributes may increase response time.[\"ALL\", \"DEFAULT\"], the service uses a logical \"AND\" operator to determine which attributes to return (in this case, all attributes). DetectFaces, they aren't supported when analyzing videos with StartFaceDetection and GetFaceDetection.MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 55 percent.MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 55 percent. Only valid when GENERAL_LABELS is specified as a feature type in the Feature input parameter.GetDataEndpoint, specifying GET_CLIP for the APIName parameter.
V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).GetClip.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for outgoing AWS data apply.GetDataEndpoint, specifying GET_CLIP for the APIName parameter.
V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).GetClip.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and Amazon Web Services Pricing. Charges for outgoing Amazon Web Services data apply.StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.
V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).
GET_DASH_STREAMING_SESSION_URL for the APIName parameter.GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).
fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps. GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.
V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).
GET_DASH_STREAMING_SESSION_URL for the APIName parameter.GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).
fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps. GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and Amazon Web Services Pricing. Charges for both HLS sessions and outgoing Amazon Web Services data apply.
x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an issue to Amazon Web Services the support team can better diagnose the problem if given the Request Id.StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.
V_MPEG/ISO/AVC (for h.264) or V_MPEG/ISO/HEVC (for h.265). Optionally, the codec ID of track 2 should be A_AAC.
GET_HLS_STREAMING_SESSION_URL for the APIName parameter.GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).
GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track. fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps. ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.
V_MPEG/ISO/AVC (for H.264) or V_MPEG/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC. For audio only streaming, the codec ID of track 1 should be A_AAC.
GET_HLS_STREAMING_SESSION_URL for the APIName parameter.GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).
GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track. fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps. ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and Amazon Web Services Pricing. Charges for both HLS sessions and outgoing Amazon Web Services data apply.
x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an issue to Amazon Web Services, the support team can better diagnose the problem if given the Request Id.GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.
x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.
x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an issue to Amazon Web Services, the support team can better diagnose the problem if given the Request Id.ListFragments. However, results are typically available in less than one second.GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.
x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.ListFragments. However, results are typically available in less than one second.GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.
x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides. x-amz-RequestId HTTP header – if you want to report an issue to Amazon Web Services, the support team can better diagnose the problem if given the Request Id.StartTimestamp and EndTimestamp is more than 300 seconds above StartTimestamp, you will receive an IllegalArgumentException.startTimestamp will be returned if available. startTimestamp will be returned if available. MaxResults greater than this value will result in a page size of 25. Any additional results will be paginated. GetMediaForFragmentList call also include the following additional Matroska (MKV) tags:
"
+ "documentation":"GetMediaForFragmentList call also include the following additional Matroska (MKV) tags:
"
}
},
"payload":"Payload"
@@ -818,15 +817,11 @@
"members":{
"Message":{"shape":"ErrorMessage"}
},
- "documentation":"GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw this error if a session with a PlaybackMode of ON_DEMAND or LIVE_REPLAYis requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.GetImages will throw this error when Kinesis Video Streams can't find the stream that you specified.GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw this error if a session with a PlaybackMode of ON_DEMAND or LIVE_REPLAYis requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.CREATE_COMPLETE, UPDATE_COMPLETE, UPDATE_ROLLBACK_COMPLETE, IMPORT_COMPLETE, and IMPORT_ROLLBACK_COMPLETE. UpdateProvisionedProduct for the provisioned product. cloudformation:GetTemplate and cloudformation:DescribeStacks IAM policy permissions. CREATE_COMPLETE, UPDATE_COMPLETE, UPDATE_ROLLBACK_COMPLETE, IMPORT_COMPLETE, and IMPORT_ROLLBACK_COMPLETE. UpdateProvisionedProduct for the provisioned product. cloudformation:GetTemplate and cloudformation:DescribeStacks IAM policy permissions. IMPORT_COMPLETE status before you import another. TagOptions sharing or Principal sharing for an existing portfolio share. CreatePortfolioShare operation is IN_PROGRESS, as the share is not available to recipient entities. In this case, you must wait for the portfolio share to be COMPLETED.accountId or organization node in the input, but not both.UpdatePortfolioShare separately for each share type. DeletePortfolioShare API for that action. PrincipalType as IAM. With this configuration, the PrincipalARN must already exist in the recipient account before it can be associated. TagOptions sharing or Principal sharing for an existing portfolio share. CreatePortfolioShare operation is IN_PROGRESS, as the share is not available to recipient entities. In this case, you must wait for the portfolio share to be completed.accountId or organization node in the input, but not both.UpdatePortfolioShare separately for each share type. DeletePortfolioShare API for that action. PrincipalType as IAM. With this configuration, the PrincipalARN must already exist in the recipient account before it can be associated. Self.self.
"
+ "documentation":"
"
},
"LastSuccessfulProvisioningRecordId":{
"shape":"Id",
- "documentation":"
"
+ "documentation":"
"
},
"Tags":{
"shape":"Tags",
@@ -4949,11 +4950,11 @@
},
"LastProvisioningRecordId":{
"shape":"Id",
- "documentation":"
"
+ "documentation":"
"
},
"LastSuccessfulProvisioningRecordId":{
"shape":"Id",
- "documentation":"
"
+ "documentation":"
"
},
"ProductId":{
"shape":"Id",
@@ -5185,7 +5186,7 @@
},
"Type":{
"shape":"ProvisioningArtifactType",
- "documentation":"
"
+ "documentation":"CLOUD_FORMATION_TEMPLATE - CloudFormation templateMARKETPLACE_AMI - Amazon Web Services Marketplace AMIMARKETPLACE_CAR - Amazon Web Services Marketplace Clusters and Amazon Web Services ResourcesCLOUD_FORMATION_TEMPLATE - CloudFormation template
"
+ "documentation":"CLOUD_FORMATION_TEMPLATE - CloudFormation templateMARKETPLACE_AMI - Amazon Web Services Marketplace AMIMARKETPLACE_CAR - Amazon Web Services Marketplace Clusters and Amazon Web Services ResourcesTERRAFORM_OPEN_SOURCE - Terraform open source configuration file
"
},
"DisableTemplateValidation":{
"shape":"DisableTemplateValidation",
- "documentation":"CLOUD_FORMATION_TEMPLATE - CloudFormation templateTERRAFORM_OPEN_SOURCE - Terraform open source configuration fileTERRAFORM_OS product type. CFN_STACK and CFN_STACKSET.CFN_STACK, CFN_STACKSET, TERRAFORM_OPEN_SOURCE, and TERRAFORM_CLOUD.SearchQuery, the searchable fields are arn, createdTime, id, lastRecordId, idempotencyToken, name, physicalId, productId, provisioningArtifact, type, status, tags, userArn, userArnSession, lastProvisioningRecordId, lastSuccessfulProvisioningRecordId, productName, and provisioningArtifactName.\"SearchQuery\":[\"status:AVAILABLE\"] SearchQuery, the searchable fields are arn, createdTime, id, lastRecordId, idempotencyToken, name, physicalId, productId, provisioningArtifactId, type, status, tags, userArn, userArnSession, lastProvisioningRecordId, lastSuccessfulProvisioningRecordId, productName, and provisioningArtifactName.\"SearchQuery\":[\"status:AVAILABLE\"] CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).StartWindowMinutes, or if the backup started later than scheduled.StartWindowMinutes, or if the backup started later than scheduled.StartWindowMinutes, this parameter has a maximum value of 100 years (52,560,000 minutes).TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).
Test Failover API called for node group <node-group-id> Failover from primary node <primary-node-id> to replica node <node-id> completed Failover from primary node <primary-node-id> to replica node <node-id> completed Recovering cache nodes <node-id> Finished recovery for cache nodes <node-id>