From 708db7e3df770cca548341a7f28fd6d276f6c4b3 Mon Sep 17 00:00:00 2001 From: David Ho <70000000+davidh44@users.noreply.github.com> Date: Tue, 13 Jun 2023 21:48:38 -0700 Subject: [PATCH 01/17] Adding codegen updates and configuration options (#4085) * Adding configuration options * Adding codegen updates * Adding javadocs * Fix CheckStyle/SpotBugs errors, and add RequestCompression interceptor trait and SdkInternalExecutionAttribute * RequestCompression interceptor trait * Refactoring, adding profile config and javadocs * Refactoring and add codegen tests * Add codegen tests --- .../amazon/awssdk/codegen/AddOperations.java | 1 + .../compression/RequestCompression.java | 36 ++ .../model/intermediate/OperationModel.java | 11 + .../codegen/model/service/Operation.java | 11 + .../poet/client/specs/JsonProtocolSpec.java | 5 +- .../poet/client/specs/QueryProtocolSpec.java | 7 +- .../poet/client/specs/XmlProtocolSpec.java | 7 +- .../traits/RequestCompressionTrait.java | 59 ++ .../poet/client/c2j/json/service-2.json | 10 + .../poet/client/c2j/query/service-2.json | 10 + .../poet/client/c2j/rest-json/service-2.json | 10 + .../poet/client/c2j/xml/service-2.json | 10 + .../test-abstract-async-client-class.java | 28 + .../test-abstract-sync-client-class.java | 512 +++++++++++++++++- .../test-aws-json-async-client-class.java | 67 +++ .../client/test-json-async-client-class.java | 67 +++ .../test-json-async-client-interface.java | 59 ++ .../poet/client/test-json-client-class.java | 58 ++ .../client/test-json-client-interface.java | 53 ++ .../client/test-query-async-client-class.java | 64 +++ .../poet/client/test-query-client-class.java | 55 ++ .../client/test-xml-async-client-class.java | 63 +++ .../poet/client/test-xml-client-class.java | 53 ++ .../awssdk/profiles/ProfileProperty.java | 12 + .../core/RequestCompressionConfiguration.java | 139 +++++ .../core/RequestOverrideConfiguration.java | 63 ++- .../amazon/awssdk/core/SdkSystemSetting.java | 12 + .../builder/SdkDefaultClientBuilder.java | 3 + .../config/ClientOverrideConfiguration.java | 70 ++- .../core/client/config/SdkClientOption.java | 7 + .../interceptor/SdkExecutionAttribute.java | 7 + .../SdkInternalExecutionAttribute.java | 7 + .../interceptor/trait/RequestCompression.java | 93 ++++ 33 files changed, 1625 insertions(+), 44 deletions(-) create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/RequestCompression.java diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java index 2db612a5f6d..79bb81470f5 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java @@ -165,6 +165,7 @@ public Map constructOperations() { operationModel.setEndpointTrait(op.getEndpoint()); operationModel.setHttpChecksumRequired(op.isHttpChecksumRequired()); operationModel.setHttpChecksum(op.getHttpChecksum()); + operationModel.setRequestCompression(op.getRequestCompression()); operationModel.setStaticContextParams(op.getStaticContextParams()); Input input = op.getInput(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java b/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java new file mode 100644 index 00000000000..69d53bc7e30 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/compression/RequestCompression.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.compression; + +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Class to map the RequestCompression trait of an operation. + */ +@SdkInternalApi +public class RequestCompression { + + private List encodings; + + public List getEncodings() { + return encodings; + } + + public void setEncodings(List encodings) { + this.encodings = encodings; + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java index 11dbe6794b8..1ff19719112 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.codegen.checksum.HttpChecksum; +import software.amazon.awssdk.codegen.compression.RequestCompression; import software.amazon.awssdk.codegen.docs.ClientType; import software.amazon.awssdk.codegen.docs.DocConfiguration; import software.amazon.awssdk.codegen.docs.OperationDocs; @@ -71,6 +72,8 @@ public class OperationModel extends DocumentationModel { private HttpChecksum httpChecksum; + private RequestCompression requestCompression; + @JsonIgnore private Map staticContextParams; @@ -309,6 +312,14 @@ public void setHttpChecksum(HttpChecksum httpChecksum) { this.httpChecksum = httpChecksum; } + public RequestCompression getRequestCompression() { + return requestCompression; + } + + public void setRequestCompression(RequestCompression requestCompression) { + this.requestCompression = requestCompression; + } + public Map getStaticContextParams() { return staticContextParams; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java index 4f1d573b013..e8a6826c17a 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/Operation.java @@ -18,6 +18,7 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.codegen.checksum.HttpChecksum; +import software.amazon.awssdk.codegen.compression.RequestCompression; import software.amazon.awssdk.codegen.model.intermediate.EndpointDiscovery; public class Operation { @@ -52,6 +53,8 @@ public class Operation { private HttpChecksum httpChecksum; + private RequestCompression requestCompression; + private Map staticContextParams; public String getName() { @@ -189,6 +192,14 @@ public void setHttpChecksum(HttpChecksum httpChecksum) { this.httpChecksum = httpChecksum; } + public RequestCompression getRequestCompression() { + return requestCompression; + } + + public void setRequestCompression(RequestCompression requestCompression) { + this.requestCompression = requestCompression; + } + public Map getStaticContextParams() { return staticContextParams; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index 41361004b80..9db5ee5c7c6 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -42,6 +42,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; import software.amazon.awssdk.codegen.poet.model.EventStreamSpecHelper; import software.amazon.awssdk.core.SdkPojoBuilder; @@ -187,7 +188,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withMetricCollector(apiCallMetricCollector)") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel)); if (opModel.hasStreamingInput()) { codeBlock.add(".withRequestBody(requestBody)") @@ -257,6 +259,7 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel)) .add(".withInput($L)$L);", opModel.getInput().getVariableName(), asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java index 74e15930c87..faacdefcd94 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java @@ -31,6 +31,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; @@ -116,7 +117,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withMetricCollector(apiCallMetricCollector)") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel)); if (opModel.hasStreamingInput()) { @@ -151,7 +153,8 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(".withMetricCollector(apiCallMetricCollector)\n") .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel)); builder.add(hostPrefixExpression(opModel) + asyncRequestBody + ".withInput($L)$L);", diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java index 59769ff51d4..6374d8accc3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java @@ -37,6 +37,7 @@ import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumTrait; import software.amazon.awssdk.codegen.poet.client.traits.NoneAuthTypeRequestTrait; +import software.amazon.awssdk.codegen.poet.client.traits.RequestCompressionTrait; import software.amazon.awssdk.codegen.poet.eventstream.EventStreamUtils; import software.amazon.awssdk.codegen.poet.model.EventStreamSpecHelper; import software.amazon.awssdk.core.SdkPojoBuilder; @@ -135,7 +136,8 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(".withInput($L)", opModel.getInput().getVariableName()) .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel)); s3ArnableFields(opModel, model).ifPresent(codeBlock::add); @@ -213,7 +215,8 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(asyncRequestBody(opModel)) .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) - .add(NoneAuthTypeRequestTrait.create(opModel)); + .add(NoneAuthTypeRequestTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel)); s3ArnableFields(opModel, model).ifPresent(builder::add); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java new file mode 100644 index 00000000000..341feeb0f50 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java @@ -0,0 +1,59 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client.traits; + +import com.squareup.javapoet.CodeBlock; +import java.util.List; +import java.util.stream.Collectors; +import software.amazon.awssdk.codegen.model.intermediate.OperationModel; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.client.handler.ClientExecutionParams; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; + +/** + * The logic for handling the "requestCompression" trait within the code generator. + */ +public class RequestCompressionTrait { + + private RequestCompressionTrait() { + } + + /** + * Generate a ".putExecutionAttribute(...)" code-block for the provided operation model. This should be used within the + * context of initializing {@link ClientExecutionParams}. If request compression is not required by the operation, this will + * return an empty code-block. + */ + public static CodeBlock create(OperationModel operationModel) { + if (operationModel.getRequestCompression() == null) { + return CodeBlock.of(""); + } + + List encodings = operationModel.getRequestCompression().getEncodings(); + + return CodeBlock.builder() + .add(CodeBlock.of(".putExecutionAttribute($T.REQUEST_COMPRESSION, " + + "$T.builder().encodings($L).isStreaming($L).build())", + SdkInternalExecutionAttribute.class, RequestCompression.class, + encodings.stream().collect(Collectors.joining("\", \"", "\"", "\"")), + operationModel.hasStreamingInput())) + .add(CodeBlock.of(".putExecutionAttribute($T.REQUEST_COMPRESSION_CONFIGURATION," + + "clientConfiguration.option($T.REQUEST_COMPRESSION_CONFIGURATION))", + SdkExecutionAttribute.class, SdkClientOption.class)) + .build(); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json index 05f73f8e606..65d93100198 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/service-2.json @@ -30,6 +30,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json index 5827a53a9a2..a3c379d189d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/service-2.json @@ -59,6 +59,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json index 66597cd7bd1..f003ba7d1e6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/service-2.json @@ -22,6 +22,16 @@ }, "httpChecksumRequired": true }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json index 267a48381fc..451eb30d1e2 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/xml/service-2.json @@ -29,6 +29,16 @@ }, "authtype": "none" }, + "OperationWithRequestCompression": { + "name": "APostOperation", + "http": { + "method": "POST", + "requestUri": "/" + }, + "requestCompression": { + "encodings": ["gzip"] + } + }, "APostOperation": { "name": "APostOperation", "http": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java index 783d45793ec..01c82c06b48 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java @@ -29,6 +29,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -305,6 +307,32 @@ public CompletableFuture operationWithChe return invokeOperation(operationWithChecksumRequiredRequest, request -> delegate.operationWithChecksumRequired(request)); } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + return delegate.operationWithRequestCompression(operationWithRequestCompressionRequest); + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java index cc067f5eab5..c6a6ccd1328 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java @@ -1,9 +1,11 @@ package software.amazon.awssdk.services.json; -import java.util.function.Function; +import java.nio.file.Path; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.awscore.exception.AwsServiceException; +import software.amazon.awssdk.core.ResponseBytes; +import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.SdkClient; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.sync.RequestBody; @@ -20,9 +22,10 @@ import software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersResponse; import software.amazon.awssdk.services.json.model.InvalidInputException; import software.amazon.awssdk.services.json.model.JsonException; -import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -35,6 +38,8 @@ import software.amazon.awssdk.services.json.model.StreamingInputOutputOperationResponse; import software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest; import software.amazon.awssdk.services.json.model.StreamingOutputOperationResponse; +import software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable; +import software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable; import software.amazon.awssdk.utils.Validate; @Generated("software.amazon.awssdk:codegen") @@ -66,11 +71,14 @@ public DelegatingJsonClient(JsonClient delegate) { * @sample JsonClient.APostOperation * @see AWS * API Documentation + * + * @deprecated This API is deprecated, use something else */ @Override + @Deprecated public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, AwsServiceException, SdkClientException, JsonException { - return invokeOperation(aPostOperationRequest, request -> delegate.aPostOperation(request)); + return delegate.aPostOperation(aPostOperationRequest); } /** @@ -97,7 +105,7 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio public APostOperationWithOutputResponse aPostOperationWithOutput( APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, SdkClientException, JsonException { - return invokeOperation(aPostOperationWithOutputRequest, request -> delegate.aPostOperationWithOutput(request)); + return delegate.aPostOperationWithOutput(aPostOperationWithOutputRequest); } /** @@ -119,7 +127,7 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(bearerAuthOperationRequest, request -> delegate.bearerAuthOperation(request)); + return delegate.bearerAuthOperation(bearerAuthOperationRequest); } /** @@ -142,7 +150,7 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques public GetOperationWithChecksumResponse getOperationWithChecksum( GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(getOperationWithChecksumRequest, request -> delegate.getOperationWithChecksum(request)); + return delegate.getOperationWithChecksum(getOperationWithChecksumRequest); } /** @@ -169,7 +177,7 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, SdkClientException, JsonException { - return invokeOperation(getWithoutRequiredMembersRequest, request -> delegate.getWithoutRequiredMembers(request)); + return delegate.getWithoutRequiredMembers(getWithoutRequiredMembersRequest); } /** @@ -192,7 +200,52 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( public OperationWithChecksumRequiredResponse operationWithChecksumRequired( OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(operationWithChecksumRequiredRequest, request -> delegate.operationWithChecksumRequired(request)); + return delegate.operationWithChecksumRequired(operationWithChecksumRequiredRequest); + } + + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + return delegate.operationWithRequestCompression(operationWithRequestCompressionRequest); + } + + /** + * Some paginated operation with result_key in paginators.json file + * + * @return Result of the PaginatedOperationWithResultKey operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.PaginatedOperationWithResultKey + * @see #paginatedOperationWithResultKey(PaginatedOperationWithResultKeyRequest) + * @see AWS API Documentation + */ + @Override + public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey() throws AwsServiceException, + SdkClientException, JsonException { + return paginatedOperationWithResultKey(PaginatedOperationWithResultKeyRequest.builder().build()); } /** @@ -215,8 +268,162 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(paginatedOperationWithResultKeyRequest, - request -> delegate.paginatedOperationWithResultKey(request)); + return delegate.paginatedOperationWithResultKey(paginatedOperationWithResultKeyRequest); + } + + /** + * Some paginated operation with result_key in paginators.json file
+ *

+ * This is a variant of + * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} + * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will + * internally handle making service calls for you. + *

+ *

+ * When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no + * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response + * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your + * request, you will see the failures only after you start iterating through the iterable. + *

+ * + *

+ * The following are few ways to iterate through the response pages: + *

+ * 1) Using a Stream + * + *
+     * {@code
+     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
+     * responses.stream().forEach(....);
+     * }
+     * 
+ * + * 2) Using For loop + * + *
+     * {
+     *     @code
+     *     software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client
+     *             .paginatedOperationWithResultKeyPaginator(request);
+     *     for (software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse response : responses) {
+     *         // do something;
+     *     }
+     * }
+     * 
+ * + * 3) Use iterator directly + * + *
+     * {@code
+     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
+     * responses.iterator().forEachRemaining(....);
+     * }
+     * 
+ *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

+ * Note: If you prefer to have control on service calls, use the + * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} + * operation. + *

+ * + * @return A custom iterable that can be used to iterate through all the response pages. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.PaginatedOperationWithResultKey + * @see #paginatedOperationWithResultKeyPaginator(PaginatedOperationWithResultKeyRequest) + * @see AWS API Documentation + */ + @Override + public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPaginator() throws AwsServiceException, + SdkClientException, JsonException { + return paginatedOperationWithResultKeyPaginator(PaginatedOperationWithResultKeyRequest.builder().build()); + } + + /** + * Some paginated operation with result_key in paginators.json file
+ *

+ * This is a variant of + * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} + * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will + * internally handle making service calls for you. + *

+ *

+ * When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no + * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response + * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your + * request, you will see the failures only after you start iterating through the iterable. + *

+ * + *

+ * The following are few ways to iterate through the response pages: + *

+ * 1) Using a Stream + * + *
+     * {@code
+     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
+     * responses.stream().forEach(....);
+     * }
+     * 
+ * + * 2) Using For loop + * + *
+     * {
+     *     @code
+     *     software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client
+     *             .paginatedOperationWithResultKeyPaginator(request);
+     *     for (software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse response : responses) {
+     *         // do something;
+     *     }
+     * }
+     * 
+ * + * 3) Use iterator directly + * + *
+     * {@code
+     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
+     * responses.iterator().forEachRemaining(....);
+     * }
+     * 
+ *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

+ * Note: If you prefer to have control on service calls, use the + * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} + * operation. + *

+ * + * @param paginatedOperationWithResultKeyRequest + * @return A custom iterable that can be used to iterate through all the response pages. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.PaginatedOperationWithResultKey + * @see AWS API Documentation + */ + @Override + public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPaginator( + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { + return delegate.paginatedOperationWithResultKeyPaginator(paginatedOperationWithResultKeyRequest); } /** @@ -239,8 +446,85 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(paginatedOperationWithoutResultKeyRequest, - request -> delegate.paginatedOperationWithoutResultKey(request)); + return delegate.paginatedOperationWithoutResultKey(paginatedOperationWithoutResultKeyRequest); + } + + /** + * Some paginated operation without result_key in paginators.json file
+ *

+ * This is a variant of + * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} + * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will + * internally handle making service calls for you. + *

+ *

+ * When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no + * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response + * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your + * request, you will see the failures only after you start iterating through the iterable. + *

+ * + *

+ * The following are few ways to iterate through the response pages: + *

+ * 1) Using a Stream + * + *
+     * {@code
+     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable responses = client.paginatedOperationWithoutResultKeyPaginator(request);
+     * responses.stream().forEach(....);
+     * }
+     * 
+ * + * 2) Using For loop + * + *
+     * {
+     *     @code
+     *     software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable responses = client
+     *             .paginatedOperationWithoutResultKeyPaginator(request);
+     *     for (software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyResponse response : responses) {
+     *         // do something;
+     *     }
+     * }
+     * 
+ * + * 3) Use iterator directly + * + *
+     * {@code
+     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable responses = client.paginatedOperationWithoutResultKeyPaginator(request);
+     * responses.iterator().forEachRemaining(....);
+     * }
+     * 
+ *

+ * Please notice that the configuration of MaxResults won't limit the number of results you get with the + * paginator. It only limits the number of results in each page. + *

+ *

+ * Note: If you prefer to have control on service calls, use the + * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} + * operation. + *

+ * + * @param paginatedOperationWithoutResultKeyRequest + * @return A custom iterable that can be used to iterate through all the response pages. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.PaginatedOperationWithoutResultKey + * @see AWS API Documentation + */ + @Override + public PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResultKeyPaginator( + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { + return delegate.paginatedOperationWithoutResultKeyPaginator(paginatedOperationWithoutResultKeyRequest); } /** @@ -289,8 +573,50 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, RequestBody requestBody, ResponseTransformer responseTransformer) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(putOperationWithChecksumRequest, - request -> delegate.putOperationWithChecksum(request, requestBody, responseTransformer)); + return delegate.putOperationWithChecksum(putOperationWithChecksumRequest, requestBody, responseTransformer); + } + + /** + * Invokes the PutOperationWithChecksum operation. + * + * @param putOperationWithChecksumRequest + * @param sourcePath + * {@link Path} to file containing data to send to the service. File will be read entirely and may be read + * multiple times in the event of a retry. If the file does not exist or the current user does not have + * access to read it then an exception will be thrown. The service documentation for the request content is + * as follows ' + *

+ * Object data. + *

+ * ' + * @param destinationPath + * {@link Path} to file that response contents will be written to. The file must not exist or this method + * will throw an exception. If the file is not writable by the current user then an exception will be thrown. + * The service documentation for the response content is as follows ' + *

+ * Object data. + *

+ * '. + * @return The transformed result of the ResponseTransformer. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.PutOperationWithChecksum + * @see #putOperationWithChecksum(PutOperationWithChecksumRequest, RequestBody) + * @see #putOperationWithChecksum(PutOperationWithChecksumRequest, ResponseTransformer) + * @see AWS API Documentation + */ + @Override + public PutOperationWithChecksumResponse putOperationWithChecksum( + PutOperationWithChecksumRequest putOperationWithChecksumRequest, Path sourcePath, Path destinationPath) + throws AwsServiceException, SdkClientException, JsonException { + return putOperationWithChecksum(putOperationWithChecksumRequest, RequestBody.fromFile(sourcePath), + ResponseTransformer.toFile(destinationPath)); } /** @@ -323,7 +649,35 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(streamingInputOperationRequest, request -> delegate.streamingInputOperation(request, requestBody)); + return delegate.streamingInputOperation(streamingInputOperationRequest, requestBody); + } + + /** + * Some operation with a streaming input + * + * @param streamingInputOperationRequest + * @param sourcePath + * {@link Path} to file containing data to send to the service. File will be read entirely and may be read + * multiple times in the event of a retry. If the file does not exist or the current user does not have + * access to read it then an exception will be thrown. The service documentation for the request content is + * as follows 'This be a stream' + * @return Result of the StreamingInputOperation operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.StreamingInputOperation + * @see #streamingInputOperation(StreamingInputOperationRequest, RequestBody) + * @see AWS API Documentation + */ + @Override + public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, + Path sourcePath) throws AwsServiceException, SdkClientException, JsonException { + return streamingInputOperation(streamingInputOperationRequest, RequestBody.fromFile(sourcePath)); } /** @@ -365,8 +719,42 @@ public ReturnT streamingInputOutputOperation( StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, ResponseTransformer responseTransformer) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(streamingInputOutputOperationRequest, - request -> delegate.streamingInputOutputOperation(request, requestBody, responseTransformer)); + return delegate.streamingInputOutputOperation(streamingInputOutputOperationRequest, requestBody, responseTransformer); + } + + /** + * Some operation with streaming input and streaming output + * + * @param streamingInputOutputOperationRequest + * @param sourcePath + * {@link Path} to file containing data to send to the service. File will be read entirely and may be read + * multiple times in the event of a retry. If the file does not exist or the current user does not have + * access to read it then an exception will be thrown. The service documentation for the request content is + * as follows 'This be a stream' + * @param destinationPath + * {@link Path} to file that response contents will be written to. The file must not exist or this method + * will throw an exception. If the file is not writable by the current user then an exception will be thrown. + * The service documentation for the response content is as follows 'This be a stream'. + * @return The transformed result of the ResponseTransformer. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.StreamingInputOutputOperation + * @see #streamingInputOutputOperation(StreamingInputOutputOperationRequest, RequestBody) + * @see #streamingInputOutputOperation(StreamingInputOutputOperationRequest, ResponseTransformer) + * @see AWS API Documentation + */ + @Override + public StreamingInputOutputOperationResponse streamingInputOutputOperation( + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, Path sourcePath, Path destinationPath) + throws AwsServiceException, SdkClientException, JsonException { + return streamingInputOutputOperation(streamingInputOutputOperationRequest, RequestBody.fromFile(sourcePath), + ResponseTransformer.toFile(destinationPath)); } /** @@ -396,10 +784,92 @@ public ReturnT streamingInputOutputOperation( public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, ResponseTransformer responseTransformer) throws AwsServiceException, SdkClientException, JsonException { - return invokeOperation(streamingOutputOperationRequest, - request -> delegate.streamingOutputOperation(request, responseTransformer)); + return delegate.streamingOutputOperation(streamingOutputOperationRequest, responseTransformer); } + /** + * Some operation with a streaming output + * + * @param streamingOutputOperationRequest + * @param destinationPath + * {@link Path} to file that response contents will be written to. The file must not exist or this method + * will throw an exception. If the file is not writable by the current user then an exception will be thrown. + * The service documentation for the response content is as follows 'This be a stream'. + * @return The transformed result of the ResponseTransformer. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.StreamingOutputOperation + * @see #streamingOutputOperation(StreamingOutputOperationRequest, ResponseTransformer) + * @see AWS API Documentation + */ + @Override + public StreamingOutputOperationResponse streamingOutputOperation( + StreamingOutputOperationRequest streamingOutputOperationRequest, Path destinationPath) throws AwsServiceException, + SdkClientException, JsonException { + return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toFile(destinationPath)); + } + + /** + * Some operation with a streaming output + * + * @param streamingOutputOperationRequest + * @return A {@link ResponseInputStream} containing data streamed from service. Note that this is an unmanaged + * reference to the underlying HTTP connection so great care must be taken to ensure all data if fully read + * from the input stream and that it is properly closed. Failure to do so may result in sub-optimal behavior + * and exhausting connections in the connection pool. The unmarshalled response object can be obtained via + * {@link ResponseInputStream#response()}. The service documentation for the response content is as follows + * 'This be a stream'. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.StreamingOutputOperation + * @see #getObject(streamingOutputOperation, ResponseTransformer) + * @see AWS API Documentation + */ + @Override + public ResponseInputStream streamingOutputOperation( + StreamingOutputOperationRequest streamingOutputOperationRequest) throws AwsServiceException, SdkClientException, + JsonException { + return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toInputStream()); + } + + /** + * Some operation with a streaming output + * + * @param streamingOutputOperationRequest + * @return A {@link ResponseBytes} that loads the data streamed from the service into memory and exposes it in + * convenient in-memory representations like a byte buffer or string. The unmarshalled response object can + * be obtained via {@link ResponseBytes#response()}. The service documentation for the response content is + * as follows 'This be a stream'. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.StreamingOutputOperation + * @see #getObject(streamingOutputOperation, ResponseTransformer) + * @see AWS API Documentation + */ + @Override + public ResponseBytes streamingOutputOperationAsBytes( + StreamingOutputOperationRequest streamingOutputOperationRequest) throws AwsServiceException, SdkClientException, + JsonException { + return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toBytes()); + } /** * Creates an instance of {@link JsonUtilities} object with the configuration set on this client. @@ -418,10 +888,6 @@ public SdkClient delegate() { return this.delegate; } - protected ReturnT invokeOperation(T request, Function operation) { - return operation.apply(request); - } - @Override public final JsonServiceClientConfiguration serviceClientConfiguration() { return delegate.serviceClientConfiguration(); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java index b03bc8eb84d..d7447337ee2 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java @@ -37,8 +37,10 @@ import software.amazon.awssdk.core.client.handler.AttachHttpMetadataResponseHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; @@ -75,6 +77,8 @@ import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.json.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.json.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -99,6 +103,7 @@ import software.amazon.awssdk.services.json.transform.InputEventTwoMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.StreamingInputOperationRequestMarshaller; @@ -679,6 +684,68 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, + clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); + return executeFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java index 4c480ea950e..85ea70c1c3e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java @@ -40,9 +40,11 @@ import software.amazon.awssdk.core.client.handler.AttachHttpMetadataResponseHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; @@ -81,6 +83,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -107,6 +111,7 @@ import software.amazon.awssdk.services.json.transform.InputEventMarshaller; import software.amazon.awssdk.services.json.transform.InputEventTwoMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PutOperationWithChecksumRequestMarshaller; @@ -757,6 +762,68 @@ public CompletableFuture operationWithChe } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, + clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture); + return executeFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java index 3ec88bc9ca0..7beab5fe6ab 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java @@ -34,6 +34,8 @@ import software.amazon.awssdk.services.json.model.InputEventStreamTwo; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -609,6 +611,63 @@ default CompletableFuture operationWithCh .applyMutation(operationWithChecksumRequiredRequest).build()); } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + throw new UnsupportedOperationException(); + } + + /** + * Invokes the OperationWithRequestCompression operation asynchronously.
+ *

+ * This is a convenience which creates an instance of the {@link OperationWithRequestCompressionRequest.Builder} + * avoiding the need to create one manually via {@link OperationWithRequestCompressionRequest#builder()} + *

+ * + * @param operationWithRequestCompressionRequest + * A {@link Consumer} that will call methods on {@link OperationWithRequestCompressionRequest.Builder} to + * create a request. + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • JsonException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample JsonAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default CompletableFuture operationWithRequestCompression( + Consumer operationWithRequestCompressionRequest) { + return operationWithRequestCompression(OperationWithRequestCompressionRequest.builder() + .applyMutation(operationWithRequestCompressionRequest).build()); + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index 54019ade037..f3b0920472f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -18,9 +18,11 @@ import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -49,6 +51,8 @@ import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -67,6 +71,7 @@ import software.amazon.awssdk.services.json.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.json.transform.GetWithoutRequiredMembersRequestMarshaller; import software.amazon.awssdk.services.json.transform.OperationWithChecksumRequiredRequestMarshaller; +import software.amazon.awssdk.services.json.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PaginatedOperationWithoutResultKeyRequestMarshaller; import software.amazon.awssdk.services.json.transform.PutOperationWithChecksumRequestMarshaller; @@ -408,6 +413,59 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) + .isPayloadJson(true).build(); + + HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( + operationMetadata, OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, + operationMetadata); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, + clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java index 47d774dcfad..e86ed6eee73 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java @@ -27,6 +27,8 @@ import software.amazon.awssdk.services.json.model.JsonException; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest; import software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest; @@ -391,6 +393,57 @@ default OperationWithChecksumRequiredResponse operationWithChecksumRequired( .applyMutation(operationWithChecksumRequiredRequest).build()); } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { + throw new UnsupportedOperationException(); + } + + /** + * Invokes the OperationWithRequestCompression operation.
+ *

+ * This is a convenience which creates an instance of the {@link OperationWithRequestCompressionRequest.Builder} + * avoiding the need to create one manually via {@link OperationWithRequestCompressionRequest#builder()} + *

+ * + * @param operationWithRequestCompressionRequest + * A {@link Consumer} that will call methods on {@link OperationWithRequestCompressionRequest.Builder} to + * create a request. + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws JsonException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample JsonClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + default OperationWithRequestCompressionResponse operationWithRequestCompression( + Consumer operationWithRequestCompressionRequest) + throws AwsServiceException, SdkClientException, JsonException { + return operationWithRequestCompression(OperationWithRequestCompressionRequest.builder() + .applyMutation(operationWithRequestCompressionRequest).build()); + } + /** * Some paginated operation with result_key in paginators.json file * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index a5e04e6abc2..8b5ad5d8bc6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -26,9 +26,11 @@ import software.amazon.awssdk.core.client.handler.AsyncClientHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -52,6 +54,8 @@ import software.amazon.awssdk.services.query.model.OperationWithContextParamResponse; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsRequest; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsResponse; import software.amazon.awssdk.services.query.model.PutOperationWithChecksumRequest; @@ -69,6 +73,7 @@ import software.amazon.awssdk.services.query.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithContextParamRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.query.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithStaticContextParamsRequestMarshaller; import software.amazon.awssdk.services.query.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingInputOperationRequestMarshaller; @@ -494,6 +499,65 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • QueryException Base class for all service exceptions. Unknown exceptions will be thrown as an + * instance of this type.
  • + *
+ * @sample QueryAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + HttpResponseHandler responseHandler = protocolFactory + .createResponseHandler(OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, + clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + return CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Invokes the OperationWithStaticContextParams operation asynchronously. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index d9fdd08fef6..1c4c1af4b10 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -17,9 +17,11 @@ import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -45,6 +47,8 @@ import software.amazon.awssdk.services.query.model.OperationWithContextParamResponse; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.query.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.query.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsRequest; import software.amazon.awssdk.services.query.model.OperationWithStaticContextParamsResponse; import software.amazon.awssdk.services.query.model.PutOperationWithChecksumRequest; @@ -62,6 +66,7 @@ import software.amazon.awssdk.services.query.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithContextParamRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.query.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.query.transform.OperationWithStaticContextParamsRequestMarshaller; import software.amazon.awssdk.services.query.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.query.transform.StreamingInputOperationRequestMarshaller; @@ -422,6 +427,56 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws QueryException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample QueryClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, QueryException { + + HttpResponseHandler responseHandler = protocolFactory + .createResponseHandler(OperationWithRequestCompressionResponse::builder); + + HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, + clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Invokes the OperationWithStaticContextParams operation. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java index c1dc8837dbb..a4745ebebe3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java @@ -32,9 +32,11 @@ import software.amazon.awssdk.core.client.handler.AsyncClientHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -61,6 +63,8 @@ import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumRequest; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumResponse; import software.amazon.awssdk.services.xml.model.StreamingInputOperationRequest; @@ -76,6 +80,7 @@ import software.amazon.awssdk.services.xml.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.xml.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingInputOperationRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingOutputOperationRequestMarshaller; @@ -519,6 +524,64 @@ public CompletableFuture operationWithNoneAut } } + /** + * Invokes the OperationWithRequestCompression operation asynchronously. + * + * @param operationWithRequestCompressionRequest + * @return A Java Future containing the result of the OperationWithRequestCompression operation returned by the + * service.
+ * The CompletableFuture returned by this method can be completed exceptionally with the following + * exceptions. + *
    + *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). + * Can be used for catch all scenarios.
  • + *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get + * credentials, etc.
  • + *
  • XmlException Base class for all service exceptions. Unknown exceptions will be thrown as an instance + * of this type.
  • + *
+ * @sample XmlAsyncClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public CompletableFuture operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + + CompletableFuture executeFuture = clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, + clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) + .withInput(operationWithRequestCompressionRequest)); + CompletableFuture whenCompleteFuture = null; + whenCompleteFuture = executeFuture.whenComplete((r, e) -> { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + }); + CompletableFutureUtils.forwardExceptionTo(whenCompleteFuture, executeFuture); + return whenCompleteFuture; + } catch (Throwable t) { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + return CompletableFutureUtils.failedFuture(t); + } + } + /** * Invokes the PutOperationWithChecksum operation asynchronously. * diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java index 43e33d67c4d..0cc6ae02423 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java @@ -18,9 +18,11 @@ import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; @@ -45,6 +47,8 @@ import software.amazon.awssdk.services.xml.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeRequest; import software.amazon.awssdk.services.xml.model.OperationWithNoneAuthTypeResponse; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.xml.model.OperationWithRequestCompressionResponse; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumRequest; import software.amazon.awssdk.services.xml.model.PutOperationWithChecksumResponse; import software.amazon.awssdk.services.xml.model.StreamingInputOperationRequest; @@ -59,6 +63,7 @@ import software.amazon.awssdk.services.xml.transform.GetOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithChecksumRequiredRequestMarshaller; import software.amazon.awssdk.services.xml.transform.OperationWithNoneAuthTypeRequestMarshaller; +import software.amazon.awssdk.services.xml.transform.OperationWithRequestCompressionRequestMarshaller; import software.amazon.awssdk.services.xml.transform.PutOperationWithChecksumRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingInputOperationRequestMarshaller; import software.amazon.awssdk.services.xml.transform.StreamingOutputOperationRequestMarshaller; @@ -361,6 +366,54 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( } } + /** + * Invokes the OperationWithRequestCompression operation. + * + * @param operationWithRequestCompressionRequest + * @return Result of the OperationWithRequestCompression operation returned by the service. + * @throws SdkException + * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for + * catch all scenarios. + * @throws SdkClientException + * If any client side error occurs such as an IO related failure, failure to get credentials, etc. + * @throws XmlException + * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. + * @sample XmlClient.OperationWithRequestCompression + * @see AWS API Documentation + */ + @Override + public OperationWithRequestCompressionResponse operationWithRequestCompression( + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, XmlException { + + HttpResponseHandler> responseHandler = protocolFactory + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + List metricPublishers = resolveMetricPublishers(clientConfiguration, + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector + .create("ApiCall"); + try { + apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); + apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); + + return clientHandler + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withInput(operationWithRequestCompressionRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, + clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + } finally { + metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); + } + } + /** * Invokes the PutOperationWithChecksum operation. * diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java index 32804fbd44e..3551f50c52f 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java @@ -141,6 +141,18 @@ public final class ProfileProperty { public static final String EC2_METADATA_SERVICE_ENDPOINT = "ec2_metadata_service_endpoint"; + /** + * Whether request compression is disabled for operations marked with the RequestCompression trait. The default value is + * false, i.e., request compression is enabled. + */ + public static final String DISABLE_REQUEST_COMPRESSION = "disable_request_compression"; + + /** + * The minimum compression size in bytes, inclusive, for a request to be compressed. The default value is 10_240. + * The value must be non-negative and no greater than 10_485_760. + */ + public static final String REQUEST_MIN_COMPRESSION_SIZE_BYTES = "request_min_compression_size_bytes"; + private ProfileProperty() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java new file mode 100644 index 00000000000..e4124024e6f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java @@ -0,0 +1,139 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Configuration options for operations with the RequestCompression trait to disable request configuration and set the minimum + * compression threshold in bytes. + */ +@SdkPublicApi +public final class RequestCompressionConfiguration implements ToCopyableBuilder { + + private final Boolean requestCompressionEnabled; + private final Integer minimumCompressionThresholdInBytes; + + private RequestCompressionConfiguration(DefaultBuilder builder) { + this.requestCompressionEnabled = builder.requestCompressionEnabled; + this.minimumCompressionThresholdInBytes = builder.minimumCompressionThresholdInBytes; + } + + /** + * If set, returns true if request compression is enabled, else false if request compression is disabled. + */ + public Boolean requestCompressionEnabled() { + return requestCompressionEnabled; + } + + /** + * If set, returns the minimum compression threshold in bytes, inclusive, in order to trigger request compression. + */ + public Integer minimumCompressionThresholdInBytes() { + return minimumCompressionThresholdInBytes; + } + + /** + * Create a {@link RequestCompressionConfiguration.Builder}, used to create a {@link RequestCompressionConfiguration}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + RequestCompressionConfiguration that = (RequestCompressionConfiguration) o; + + if (!requestCompressionEnabled.equals(that.requestCompressionEnabled)) { + return false; + } + return Objects.equals(minimumCompressionThresholdInBytes, that.minimumCompressionThresholdInBytes); + } + + @Override + public int hashCode() { + int result = requestCompressionEnabled != null ? requestCompressionEnabled.hashCode() : 0; + result = 31 * result + (minimumCompressionThresholdInBytes != null ? minimumCompressionThresholdInBytes.hashCode() : 0); + return result; + } + + + public interface Builder extends CopyableBuilder { + + /** + * Configures whether request compression is enabled or not. The default value is true. + * + * @param requestCompressionEnabled + * @return This object for method chaining. + */ + Builder requestCompressionEnabled(Boolean requestCompressionEnabled); + + /** + * Configures the minimum compression threshold, inclusive, in bytes. The default value is 10_240. The value must be + * non-negative and no greater than 10_485_760. + * + * @param minimumCompressionThresholdInBytes + * @return This object for method chaining. + */ + Builder minimumCompressionThresholdInBytes(Integer minimumCompressionThresholdInBytes); + } + + private static final class DefaultBuilder implements Builder { + private Boolean requestCompressionEnabled; + private Integer minimumCompressionThresholdInBytes; + + private DefaultBuilder() { + } + + private DefaultBuilder(RequestCompressionConfiguration requestCompressionConfiguration) { + this.requestCompressionEnabled = requestCompressionConfiguration.requestCompressionEnabled; + this.minimumCompressionThresholdInBytes = requestCompressionConfiguration.minimumCompressionThresholdInBytes; + } + + @Override + public Builder requestCompressionEnabled(Boolean requestCompressionEnabled) { + this.requestCompressionEnabled = requestCompressionEnabled; + return this; + } + + @Override + public Builder minimumCompressionThresholdInBytes(Integer minimumCompressionThresholdInBytes) { + this.minimumCompressionThresholdInBytes = minimumCompressionThresholdInBytes; + return this; + } + + @Override + public RequestCompressionConfiguration build() { + return new RequestCompressionConfiguration(this); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java index cb4daf65922..16a6e76d6fc 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestOverrideConfiguration.java @@ -51,8 +51,8 @@ public abstract class RequestOverrideConfiguration { private final Signer signer; private final List metricPublishers; private final ExecutionAttributes executionAttributes; - private final EndpointProvider endpointProvider; + private final RequestCompressionConfiguration requestCompressionConfiguration; protected RequestOverrideConfiguration(Builder builder) { this.headers = CollectionUtils.deepUnmodifiableMap(builder.headers(), () -> new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); @@ -64,6 +64,7 @@ protected RequestOverrideConfiguration(Builder builder) { this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); this.executionAttributes = ExecutionAttributes.unmodifiableExecutionAttributes(builder.executionAttributes()); this.endpointProvider = builder.endpointProvider(); + this.requestCompressionConfiguration = builder.requestCompressionConfiguration(); } /** @@ -165,6 +166,15 @@ public Optional endpointProvider() { return Optional.ofNullable(endpointProvider); } + /** + * Returns the request compression configuration object, if present, which includes options to enable/disable request + * compression and set the minimum compression threshold. This request compression config object supersedes the request + * compression config object set on the client. + */ + public Optional requestCompressionConfiguration() { + return Optional.ofNullable(requestCompressionConfiguration); + } + @Override public boolean equals(Object o) { if (this == o) { @@ -182,7 +192,8 @@ public boolean equals(Object o) { Objects.equals(signer, that.signer) && Objects.equals(metricPublishers, that.metricPublishers) && Objects.equals(executionAttributes, that.executionAttributes) && - Objects.equals(endpointProvider, that.endpointProvider); + Objects.equals(endpointProvider, that.endpointProvider) && + Objects.equals(requestCompressionConfiguration, that.requestCompressionConfiguration); } @Override @@ -197,6 +208,7 @@ public int hashCode() { hashCode = 31 * hashCode + Objects.hashCode(metricPublishers); hashCode = 31 * hashCode + Objects.hashCode(executionAttributes); hashCode = 31 * hashCode + Objects.hashCode(endpointProvider); + hashCode = 31 * hashCode + Objects.hashCode(requestCompressionConfiguration); return hashCode; } @@ -438,6 +450,28 @@ default B putRawQueryParameter(String name, String value) { EndpointProvider endpointProvider(); + /** + * Sets the {@link RequestCompressionConfiguration} for this request. The order of precedence, from highest to lowest, + * for this setting is: 1) Per request configuration 2) Client configuration 3) Environment variables 4) Profile setting. + * + * @param requestCompressionConfiguration Request compression configuration object for this request. + */ + B requestCompressionConfiguration(RequestCompressionConfiguration requestCompressionConfiguration); + + /** + * Sets the {@link RequestCompressionConfiguration} for this request. The order of precedence, from highest to lowest, + * for this setting is: 1) Per request configuration 2) Client configuration 3) Environment variables 4) Profile setting. + * + * @param requestCompressionConfigurationConsumer A {@link Consumer} that accepts a + * {@link RequestCompressionConfiguration.Builder} + * + * @return This object for method chaining + */ + B requestCompressionConfiguration(Consumer + requestCompressionConfigurationConsumer); + + RequestCompressionConfiguration requestCompressionConfiguration(); + /** * Create a new {@code SdkRequestOverrideConfiguration} with the properties set on this builder. * @@ -455,9 +489,8 @@ protected abstract static class BuilderImpl implements Builde private Signer signer; private List metricPublishers = new ArrayList<>(); private ExecutionAttributes.Builder executionAttributesBuilder = ExecutionAttributes.builder(); - private EndpointProvider endpointProvider; - + private RequestCompressionConfiguration requestCompressionConfiguration; protected BuilderImpl() { } @@ -472,6 +505,7 @@ protected BuilderImpl(RequestOverrideConfiguration sdkRequestOverrideConfig) { metricPublishers(sdkRequestOverrideConfig.metricPublishers()); executionAttributes(sdkRequestOverrideConfig.executionAttributes()); endpointProvider(sdkRequestOverrideConfig.endpointProvider); + requestCompressionConfiguration(sdkRequestOverrideConfig.requestCompressionConfiguration); } @Override @@ -626,7 +660,6 @@ public void setExecutionAttributes(ExecutionAttributes executionAttributes) { executionAttributes(executionAttributes); } - @Override public B endpointProvider(EndpointProvider endpointProvider) { this.endpointProvider = endpointProvider; @@ -641,5 +674,25 @@ public void setEndpointProvider(EndpointProvider endpointProvider) { public EndpointProvider endpointProvider() { return endpointProvider; } + + @Override + public B requestCompressionConfiguration(RequestCompressionConfiguration requestCompressionConfiguration) { + this.requestCompressionConfiguration = requestCompressionConfiguration; + return (B) this; + } + + @Override + public B requestCompressionConfiguration(Consumer + requestCompressionConfigurationConsumer) { + RequestCompressionConfiguration.Builder b = RequestCompressionConfiguration.builder(); + requestCompressionConfigurationConsumer.accept(b); + requestCompressionConfiguration(b.build()); + return (B) this; + } + + @Override + public RequestCompressionConfiguration requestCompressionConfiguration() { + return requestCompressionConfiguration; + } } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java index 1e5c400ca61..f04029a3b0f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java @@ -184,6 +184,18 @@ public enum SdkSystemSetting implements SystemSetting { */ AWS_USE_FIPS_ENDPOINT("aws.useFipsEndpoint", null), + /** + * Whether request compression is disabled for operations marked with the RequestCompression trait. The default value is + * false, i.e., request compression is enabled. + */ + AWS_DISABLE_REQUEST_COMPRESSION("aws.disableRequestCompression", null), + + /** + * Defines the minimum compression size in bytes, inclusive, for a request to be compressed. The default value is 10_240. + * The value must be non-negative and no greater than 10_485_760. + */ + AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES("aws.requestMinCompressionSizeBytes", null), + ; private final String systemProperty; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 898cfbbd4ea..596caf82a0a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -38,6 +38,7 @@ import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_FILE; import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_FILE_SUPPLIER; import static software.amazon.awssdk.core.client.config.SdkClientOption.PROFILE_NAME; +import static software.amazon.awssdk.core.client.config.SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION; import static software.amazon.awssdk.core.client.config.SdkClientOption.RETRY_POLICY; import static software.amazon.awssdk.core.client.config.SdkClientOption.SCHEDULED_EXECUTOR_SERVICE; import static software.amazon.awssdk.core.client.config.SdkClientOption.SIGNER_OVERRIDDEN; @@ -237,6 +238,8 @@ private SdkClientConfiguration setOverrides(SdkClientConfiguration configuration builder.option(METRIC_PUBLISHERS, clientOverrideConfiguration.metricPublishers()); builder.option(EXECUTION_ATTRIBUTES, clientOverrideConfiguration.executionAttributes()); builder.option(TOKEN_SIGNER, clientOverrideConfiguration.advancedOption(TOKEN_SIGNER).orElse(null)); + builder.option(REQUEST_COMPRESSION_CONFIGURATION, + clientOverrideConfiguration.requestCompressionConfiguration().orElse(null)); clientOverrideConfiguration.advancedOption(ENDPOINT_OVERRIDDEN_OVERRIDE).ifPresent(value -> { builder.option(ENDPOINT_OVERRIDDEN, value); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java index 83cf2317038..a0c9ff18c6d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/ClientOverrideConfiguration.java @@ -27,6 +27,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.annotations.ToBuilderIgnoreField; +import software.amazon.awssdk.core.RequestCompressionConfiguration; import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.interceptor.ExecutionAttribute; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; @@ -64,6 +65,7 @@ public final class ClientOverrideConfiguration private final List metricPublishers; private final ExecutionAttributes executionAttributes; private final ScheduledExecutorService scheduledExecutorService; + private final RequestCompressionConfiguration requestCompressionConfiguration; /** * Initialize this configuration. Private to require use of {@link #builder()}. @@ -80,6 +82,7 @@ private ClientOverrideConfiguration(Builder builder) { this.metricPublishers = Collections.unmodifiableList(new ArrayList<>(builder.metricPublishers())); this.executionAttributes = ExecutionAttributes.unmodifiableExecutionAttributes(builder.executionAttributes()); this.scheduledExecutorService = builder.scheduledExecutorService(); + this.requestCompressionConfiguration = builder.requestCompressionConfiguration(); } @Override @@ -96,7 +99,8 @@ public Builder toBuilder() { .defaultProfileName(defaultProfileName) .executionAttributes(executionAttributes) .metricPublishers(metricPublishers) - .scheduledExecutorService(scheduledExecutorService); + .scheduledExecutorService(scheduledExecutorService) + .requestCompressionConfiguration(requestCompressionConfiguration); } /** @@ -230,19 +234,30 @@ public ExecutionAttributes executionAttributes() { return executionAttributes; } + /** + * The request compression configuration object, which includes options to enable/disable request compression and set the + * minimum compression threshold. + * + * @see Builder#requestCompressionConfiguration(RequestCompressionConfiguration) + */ + public Optional requestCompressionConfiguration() { + return Optional.ofNullable(requestCompressionConfiguration); + } + @Override public String toString() { return ToString.builder("ClientOverrideConfiguration") - .add("headers", headers) - .add("retryPolicy", retryPolicy) - .add("apiCallTimeout", apiCallTimeout) - .add("apiCallAttemptTimeout", apiCallAttemptTimeout) - .add("executionInterceptors", executionInterceptors) - .add("advancedOptions", advancedOptions) - .add("profileFile", defaultProfileFile) - .add("profileName", defaultProfileName) - .add("scheduledExecutorService", scheduledExecutorService) - .build(); + .add("headers", headers) + .add("retryPolicy", retryPolicy) + .add("apiCallTimeout", apiCallTimeout) + .add("apiCallAttemptTimeout", apiCallAttemptTimeout) + .add("executionInterceptors", executionInterceptors) + .add("advancedOptions", advancedOptions) + .add("profileFile", defaultProfileFile) + .add("profileName", defaultProfileName) + .add("scheduledExecutorService", scheduledExecutorService) + .add("requestCompressionConfiguration", requestCompressionConfiguration) + .build(); } /** @@ -513,6 +528,23 @@ default Builder retryPolicy(RetryMode retryMode) { Builder putExecutionAttribute(ExecutionAttribute attribute, T value); ExecutionAttributes executionAttributes(); + + /** + * Sets the {@link RequestCompressionConfiguration} for this client. + */ + Builder requestCompressionConfiguration(RequestCompressionConfiguration requestCompressionConfiguration); + + /** + * Sets the {@link RequestCompressionConfiguration} for this client. + */ + default Builder requestCompressionConfiguration(Consumer + requestCompressionConfiguration) { + return requestCompressionConfiguration(RequestCompressionConfiguration.builder() + .applyMutation(requestCompressionConfiguration) + .build()); + } + + RequestCompressionConfiguration requestCompressionConfiguration(); } /** @@ -530,6 +562,7 @@ private static final class DefaultClientOverrideConfigurationBuilder implements private List metricPublishers = new ArrayList<>(); private ExecutionAttributes.Builder executionAttributes = ExecutionAttributes.builder(); private ScheduledExecutorService scheduledExecutorService; + private RequestCompressionConfiguration requestCompressionConfiguration; @Override public Builder headers(Map> headers) { @@ -724,6 +757,21 @@ public ExecutionAttributes executionAttributes() { return executionAttributes.build(); } + @Override + public Builder requestCompressionConfiguration(RequestCompressionConfiguration requestCompressionConfiguration) { + this.requestCompressionConfiguration = requestCompressionConfiguration; + return this; + } + + public void setRequestCompressionEnabled(RequestCompressionConfiguration requestCompressionConfiguration) { + requestCompressionConfiguration(requestCompressionConfiguration); + } + + @Override + public RequestCompressionConfiguration requestCompressionConfiguration() { + return requestCompressionConfiguration; + } + @Override public ClientOverrideConfiguration build() { return new ClientOverrideConfiguration(this); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java index 07361d75f23..4ae8e49dcc9 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java @@ -23,6 +23,7 @@ import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.ClientType; +import software.amazon.awssdk.core.RequestCompressionConfiguration; import software.amazon.awssdk.core.ServiceConfiguration; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; @@ -190,6 +191,12 @@ public final class SdkClientOption extends ClientOption { public static final SdkClientOption CLIENT_CONTEXT_PARAMS = new SdkClientOption<>(AttributeMap.class); + /** + * Option to specify the request compression configuration settings. + */ + public static final SdkClientOption REQUEST_COMPRESSION_CONFIGURATION = + new SdkClientOption<>(RequestCompressionConfiguration.class); + private SdkClientOption(Class valueClass) { super(valueClass); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java index 6e71448dc98..8dd6c86b8dc 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java @@ -19,6 +19,7 @@ import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.ClientType; +import software.amazon.awssdk.core.RequestCompressionConfiguration; import software.amazon.awssdk.core.ServiceConfiguration; import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.ChecksumSpecs; @@ -109,6 +110,12 @@ public class SdkExecutionAttribute { public static final ExecutionAttribute HTTP_RESPONSE_CHECKSUM_VALIDATION = new ExecutionAttribute<>( "HttpResponseChecksumValidation"); + /** + * The {@link RequestCompressionConfiguration}, which includes options to enable/disable request compression and set the + * minimum compression threshold. + */ + public static final ExecutionAttribute REQUEST_COMPRESSION_CONFIGURATION = + new ExecutionAttribute<>("RequestCompressionConfiguration"); protected SdkExecutionAttribute() { } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java index 3080d0fd47b..f0d51c8cb32 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java @@ -18,6 +18,7 @@ import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.endpoints.Endpoint; import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.SdkHttpExecutionAttributes; @@ -92,6 +93,12 @@ public final class SdkInternalExecutionAttribute extends SdkExecutionAttribute { public static final ExecutionAttribute IS_DISCOVERED_ENDPOINT = new ExecutionAttribute<>("IsDiscoveredEndpoint"); + /** + * The supported compression algorithms for an operation, and whether the operation is streaming or not. + */ + public static final ExecutionAttribute REQUEST_COMPRESSION = + new ExecutionAttribute<>("RequestCompression"); + private SdkInternalExecutionAttribute() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/RequestCompression.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/RequestCompression.java new file mode 100644 index 00000000000..ecb452aed30 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/RequestCompression.java @@ -0,0 +1,93 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.interceptor.trait; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public final class RequestCompression { + + private List encodings; + private boolean isStreaming; + + private RequestCompression(Builder builder) { + this.encodings = builder.encodings; + this.isStreaming = builder.isStreaming; + } + + public List getEncodings() { + return encodings; + } + + public boolean isStreaming() { + return isStreaming; + } + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + + private List encodings; + private boolean isStreaming; + + public Builder encodings(List encodings) { + this.encodings = encodings; + return this; + } + + public Builder encodings(String... encodings) { + if (encodings != null) { + this.encodings = Arrays.asList(encodings); + } + return this; + } + + public Builder isStreaming(boolean isStreaming) { + this.isStreaming = isStreaming; + return this; + } + + public RequestCompression build() { + return new RequestCompression(this); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RequestCompression that = (RequestCompression) o; + return isStreaming == that.isStreaming() + && Objects.equals(encodings, that.getEncodings()); + } + + @Override + public int hashCode() { + int hashCode = 1; + hashCode = 31 * hashCode + (isStreaming ? 1 : 0); + hashCode = 31 * hashCode + Objects.hashCode(encodings); + return hashCode; + } +} From 0b1d4cad455109024e421ba6212c50aff2620837 Mon Sep 17 00:00:00 2001 From: hdavidh Date: Mon, 17 Jul 2023 13:05:38 -0700 Subject: [PATCH 02/17] Update code generated test classes --- .../test-abstract-async-client-class.java | 5 +- .../test-abstract-sync-client-class.java | 491 +----------------- 2 files changed, 28 insertions(+), 468 deletions(-) diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java index 01c82c06b48..05c47601846 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-async-client-class.java @@ -330,7 +330,8 @@ public CompletableFuture operationWithChe @Override public CompletableFuture operationWithRequestCompression( OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { - return delegate.operationWithRequestCompression(operationWithRequestCompressionRequest); + return invokeOperation(operationWithRequestCompressionRequest, + request -> delegate.operationWithRequestCompression(request)); } /** @@ -496,7 +497,7 @@ public CompletableFuture streamingInputOutputOperation( StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, AsyncRequestBody requestBody, AsyncResponseTransformer asyncResponseTransformer) { return invokeOperation(streamingInputOutputOperationRequest, - request -> delegate.streamingInputOutputOperation(request, requestBody, asyncResponseTransformer)); + request -> delegate.streamingInputOutputOperation(request, requestBody, asyncResponseTransformer)); } /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java index c6a6ccd1328..8fc5e6c0adc 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-abstract-sync-client-class.java @@ -1,11 +1,9 @@ package software.amazon.awssdk.services.json; -import java.nio.file.Path; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.awscore.exception.AwsServiceException; -import software.amazon.awssdk.core.ResponseBytes; -import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.SdkClient; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.sync.RequestBody; @@ -22,6 +20,7 @@ import software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersResponse; import software.amazon.awssdk.services.json.model.InvalidInputException; import software.amazon.awssdk.services.json.model.JsonException; +import software.amazon.awssdk.services.json.model.JsonRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest; import software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredResponse; import software.amazon.awssdk.services.json.model.OperationWithRequestCompressionRequest; @@ -38,8 +37,6 @@ import software.amazon.awssdk.services.json.model.StreamingInputOutputOperationResponse; import software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest; import software.amazon.awssdk.services.json.model.StreamingOutputOperationResponse; -import software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable; -import software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable; import software.amazon.awssdk.utils.Validate; @Generated("software.amazon.awssdk:codegen") @@ -71,14 +68,11 @@ public DelegatingJsonClient(JsonClient delegate) { * @sample JsonClient.APostOperation * @see AWS * API Documentation - * - * @deprecated This API is deprecated, use something else */ @Override - @Deprecated public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, AwsServiceException, SdkClientException, JsonException { - return delegate.aPostOperation(aPostOperationRequest); + return invokeOperation(aPostOperationRequest, request -> delegate.aPostOperation(request)); } /** @@ -105,7 +99,7 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio public APostOperationWithOutputResponse aPostOperationWithOutput( APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, SdkClientException, JsonException { - return delegate.aPostOperationWithOutput(aPostOperationWithOutputRequest); + return invokeOperation(aPostOperationWithOutputRequest, request -> delegate.aPostOperationWithOutput(request)); } /** @@ -127,7 +121,7 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) throws AwsServiceException, SdkClientException, JsonException { - return delegate.bearerAuthOperation(bearerAuthOperationRequest); + return invokeOperation(bearerAuthOperationRequest, request -> delegate.bearerAuthOperation(request)); } /** @@ -150,7 +144,7 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques public GetOperationWithChecksumResponse getOperationWithChecksum( GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, JsonException { - return delegate.getOperationWithChecksum(getOperationWithChecksumRequest); + return invokeOperation(getOperationWithChecksumRequest, request -> delegate.getOperationWithChecksum(request)); } /** @@ -177,7 +171,7 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, SdkClientException, JsonException { - return delegate.getWithoutRequiredMembers(getWithoutRequiredMembersRequest); + return invokeOperation(getWithoutRequiredMembersRequest, request -> delegate.getWithoutRequiredMembers(request)); } /** @@ -200,7 +194,7 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( public OperationWithChecksumRequiredResponse operationWithChecksumRequired( OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, SdkClientException, JsonException { - return delegate.operationWithChecksumRequired(operationWithChecksumRequiredRequest); + return invokeOperation(operationWithChecksumRequiredRequest, request -> delegate.operationWithChecksumRequired(request)); } /** @@ -223,29 +217,8 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( public OperationWithRequestCompressionResponse operationWithRequestCompression( OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, SdkClientException, JsonException { - return delegate.operationWithRequestCompression(operationWithRequestCompressionRequest); - } - - /** - * Some paginated operation with result_key in paginators.json file - * - * @return Result of the PaginatedOperationWithResultKey operation returned by the service. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.PaginatedOperationWithResultKey - * @see #paginatedOperationWithResultKey(PaginatedOperationWithResultKeyRequest) - * @see AWS API Documentation - */ - @Override - public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey() throws AwsServiceException, - SdkClientException, JsonException { - return paginatedOperationWithResultKey(PaginatedOperationWithResultKeyRequest.builder().build()); + return invokeOperation(operationWithRequestCompressionRequest, + request -> delegate.operationWithRequestCompression(request)); } /** @@ -268,162 +241,8 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey() public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, SdkClientException, JsonException { - return delegate.paginatedOperationWithResultKey(paginatedOperationWithResultKeyRequest); - } - - /** - * Some paginated operation with result_key in paginators.json file
- *

- * This is a variant of - * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} - * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will - * internally handle making service calls for you. - *

- *

- * When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no - * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response - * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your - * request, you will see the failures only after you start iterating through the iterable. - *

- * - *

- * The following are few ways to iterate through the response pages: - *

- * 1) Using a Stream - * - *
-     * {@code
-     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
-     * responses.stream().forEach(....);
-     * }
-     * 
- * - * 2) Using For loop - * - *
-     * {
-     *     @code
-     *     software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client
-     *             .paginatedOperationWithResultKeyPaginator(request);
-     *     for (software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse response : responses) {
-     *         // do something;
-     *     }
-     * }
-     * 
- * - * 3) Use iterator directly - * - *
-     * {@code
-     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
-     * responses.iterator().forEachRemaining(....);
-     * }
-     * 
- *

- * Please notice that the configuration of MaxResults won't limit the number of results you get with the - * paginator. It only limits the number of results in each page. - *

- *

- * Note: If you prefer to have control on service calls, use the - * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} - * operation. - *

- * - * @return A custom iterable that can be used to iterate through all the response pages. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.PaginatedOperationWithResultKey - * @see #paginatedOperationWithResultKeyPaginator(PaginatedOperationWithResultKeyRequest) - * @see AWS API Documentation - */ - @Override - public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPaginator() throws AwsServiceException, - SdkClientException, JsonException { - return paginatedOperationWithResultKeyPaginator(PaginatedOperationWithResultKeyRequest.builder().build()); - } - - /** - * Some paginated operation with result_key in paginators.json file
- *

- * This is a variant of - * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} - * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will - * internally handle making service calls for you. - *

- *

- * When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no - * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response - * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your - * request, you will see the failures only after you start iterating through the iterable. - *

- * - *

- * The following are few ways to iterate through the response pages: - *

- * 1) Using a Stream - * - *
-     * {@code
-     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
-     * responses.stream().forEach(....);
-     * }
-     * 
- * - * 2) Using For loop - * - *
-     * {
-     *     @code
-     *     software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client
-     *             .paginatedOperationWithResultKeyPaginator(request);
-     *     for (software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyResponse response : responses) {
-     *         // do something;
-     *     }
-     * }
-     * 
- * - * 3) Use iterator directly - * - *
-     * {@code
-     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithResultKeyIterable responses = client.paginatedOperationWithResultKeyPaginator(request);
-     * responses.iterator().forEachRemaining(....);
-     * }
-     * 
- *

- * Please notice that the configuration of MaxResults won't limit the number of results you get with the - * paginator. It only limits the number of results in each page. - *

- *

- * Note: If you prefer to have control on service calls, use the - * {@link #paginatedOperationWithResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest)} - * operation. - *

- * - * @param paginatedOperationWithResultKeyRequest - * @return A custom iterable that can be used to iterate through all the response pages. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.PaginatedOperationWithResultKey - * @see AWS API Documentation - */ - @Override - public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPaginator( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { - return delegate.paginatedOperationWithResultKeyPaginator(paginatedOperationWithResultKeyRequest); + return invokeOperation(paginatedOperationWithResultKeyRequest, + request -> delegate.paginatedOperationWithResultKey(request)); } /** @@ -446,85 +265,8 @@ public PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyPa public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, SdkClientException, JsonException { - return delegate.paginatedOperationWithoutResultKey(paginatedOperationWithoutResultKeyRequest); - } - - /** - * Some paginated operation without result_key in paginators.json file
- *

- * This is a variant of - * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} - * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will - * internally handle making service calls for you. - *

- *

- * When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no - * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response - * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your - * request, you will see the failures only after you start iterating through the iterable. - *

- * - *

- * The following are few ways to iterate through the response pages: - *

- * 1) Using a Stream - * - *
-     * {@code
-     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable responses = client.paginatedOperationWithoutResultKeyPaginator(request);
-     * responses.stream().forEach(....);
-     * }
-     * 
- * - * 2) Using For loop - * - *
-     * {
-     *     @code
-     *     software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable responses = client
-     *             .paginatedOperationWithoutResultKeyPaginator(request);
-     *     for (software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyResponse response : responses) {
-     *         // do something;
-     *     }
-     * }
-     * 
- * - * 3) Use iterator directly - * - *
-     * {@code
-     * software.amazon.awssdk.services.json.paginators.PaginatedOperationWithoutResultKeyIterable responses = client.paginatedOperationWithoutResultKeyPaginator(request);
-     * responses.iterator().forEachRemaining(....);
-     * }
-     * 
- *

- * Please notice that the configuration of MaxResults won't limit the number of results you get with the - * paginator. It only limits the number of results in each page. - *

- *

- * Note: If you prefer to have control on service calls, use the - * {@link #paginatedOperationWithoutResultKey(software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest)} - * operation. - *

- * - * @param paginatedOperationWithoutResultKeyRequest - * @return A custom iterable that can be used to iterate through all the response pages. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.PaginatedOperationWithoutResultKey - * @see AWS API Documentation - */ - @Override - public PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResultKeyPaginator( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { - return delegate.paginatedOperationWithoutResultKeyPaginator(paginatedOperationWithoutResultKeyRequest); + return invokeOperation(paginatedOperationWithoutResultKeyRequest, + request -> delegate.paginatedOperationWithoutResultKey(request)); } /** @@ -573,50 +315,8 @@ public PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResul public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, RequestBody requestBody, ResponseTransformer responseTransformer) throws AwsServiceException, SdkClientException, JsonException { - return delegate.putOperationWithChecksum(putOperationWithChecksumRequest, requestBody, responseTransformer); - } - - /** - * Invokes the PutOperationWithChecksum operation. - * - * @param putOperationWithChecksumRequest - * @param sourcePath - * {@link Path} to file containing data to send to the service. File will be read entirely and may be read - * multiple times in the event of a retry. If the file does not exist or the current user does not have - * access to read it then an exception will be thrown. The service documentation for the request content is - * as follows ' - *

- * Object data. - *

- * ' - * @param destinationPath - * {@link Path} to file that response contents will be written to. The file must not exist or this method - * will throw an exception. If the file is not writable by the current user then an exception will be thrown. - * The service documentation for the response content is as follows ' - *

- * Object data. - *

- * '. - * @return The transformed result of the ResponseTransformer. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.PutOperationWithChecksum - * @see #putOperationWithChecksum(PutOperationWithChecksumRequest, RequestBody) - * @see #putOperationWithChecksum(PutOperationWithChecksumRequest, ResponseTransformer) - * @see AWS API Documentation - */ - @Override - public PutOperationWithChecksumResponse putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, Path sourcePath, Path destinationPath) - throws AwsServiceException, SdkClientException, JsonException { - return putOperationWithChecksum(putOperationWithChecksumRequest, RequestBody.fromFile(sourcePath), - ResponseTransformer.toFile(destinationPath)); + return invokeOperation(putOperationWithChecksumRequest, + request -> delegate.putOperationWithChecksum(request, requestBody, responseTransformer)); } /** @@ -649,35 +349,7 @@ public PutOperationWithChecksumResponse putOperationWithChecksum( @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { - return delegate.streamingInputOperation(streamingInputOperationRequest, requestBody); - } - - /** - * Some operation with a streaming input - * - * @param streamingInputOperationRequest - * @param sourcePath - * {@link Path} to file containing data to send to the service. File will be read entirely and may be read - * multiple times in the event of a retry. If the file does not exist or the current user does not have - * access to read it then an exception will be thrown. The service documentation for the request content is - * as follows 'This be a stream' - * @return Result of the StreamingInputOperation operation returned by the service. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.StreamingInputOperation - * @see #streamingInputOperation(StreamingInputOperationRequest, RequestBody) - * @see AWS API Documentation - */ - @Override - public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - Path sourcePath) throws AwsServiceException, SdkClientException, JsonException { - return streamingInputOperation(streamingInputOperationRequest, RequestBody.fromFile(sourcePath)); + return invokeOperation(streamingInputOperationRequest, request -> delegate.streamingInputOperation(request, requestBody)); } /** @@ -719,42 +391,8 @@ public ReturnT streamingInputOutputOperation( StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, ResponseTransformer responseTransformer) throws AwsServiceException, SdkClientException, JsonException { - return delegate.streamingInputOutputOperation(streamingInputOutputOperationRequest, requestBody, responseTransformer); - } - - /** - * Some operation with streaming input and streaming output - * - * @param streamingInputOutputOperationRequest - * @param sourcePath - * {@link Path} to file containing data to send to the service. File will be read entirely and may be read - * multiple times in the event of a retry. If the file does not exist or the current user does not have - * access to read it then an exception will be thrown. The service documentation for the request content is - * as follows 'This be a stream' - * @param destinationPath - * {@link Path} to file that response contents will be written to. The file must not exist or this method - * will throw an exception. If the file is not writable by the current user then an exception will be thrown. - * The service documentation for the response content is as follows 'This be a stream'. - * @return The transformed result of the ResponseTransformer. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.StreamingInputOutputOperation - * @see #streamingInputOutputOperation(StreamingInputOutputOperationRequest, RequestBody) - * @see #streamingInputOutputOperation(StreamingInputOutputOperationRequest, ResponseTransformer) - * @see AWS API Documentation - */ - @Override - public StreamingInputOutputOperationResponse streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, Path sourcePath, Path destinationPath) - throws AwsServiceException, SdkClientException, JsonException { - return streamingInputOutputOperation(streamingInputOutputOperationRequest, RequestBody.fromFile(sourcePath), - ResponseTransformer.toFile(destinationPath)); + return invokeOperation(streamingInputOutputOperationRequest, + request -> delegate.streamingInputOutputOperation(request, requestBody, responseTransformer)); } /** @@ -784,91 +422,8 @@ public StreamingInputOutputOperationResponse streamingInputOutputOperation( public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, ResponseTransformer responseTransformer) throws AwsServiceException, SdkClientException, JsonException { - return delegate.streamingOutputOperation(streamingOutputOperationRequest, responseTransformer); - } - - /** - * Some operation with a streaming output - * - * @param streamingOutputOperationRequest - * @param destinationPath - * {@link Path} to file that response contents will be written to. The file must not exist or this method - * will throw an exception. If the file is not writable by the current user then an exception will be thrown. - * The service documentation for the response content is as follows 'This be a stream'. - * @return The transformed result of the ResponseTransformer. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.StreamingOutputOperation - * @see #streamingOutputOperation(StreamingOutputOperationRequest, ResponseTransformer) - * @see AWS API Documentation - */ - @Override - public StreamingOutputOperationResponse streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, Path destinationPath) throws AwsServiceException, - SdkClientException, JsonException { - return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toFile(destinationPath)); - } - - /** - * Some operation with a streaming output - * - * @param streamingOutputOperationRequest - * @return A {@link ResponseInputStream} containing data streamed from service. Note that this is an unmanaged - * reference to the underlying HTTP connection so great care must be taken to ensure all data if fully read - * from the input stream and that it is properly closed. Failure to do so may result in sub-optimal behavior - * and exhausting connections in the connection pool. The unmarshalled response object can be obtained via - * {@link ResponseInputStream#response()}. The service documentation for the response content is as follows - * 'This be a stream'. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.StreamingOutputOperation - * @see #getObject(streamingOutputOperation, ResponseTransformer) - * @see AWS API Documentation - */ - @Override - public ResponseInputStream streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest) throws AwsServiceException, SdkClientException, - JsonException { - return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toInputStream()); - } - - /** - * Some operation with a streaming output - * - * @param streamingOutputOperationRequest - * @return A {@link ResponseBytes} that loads the data streamed from the service into memory and exposes it in - * convenient in-memory representations like a byte buffer or string. The unmarshalled response object can - * be obtained via {@link ResponseBytes#response()}. The service documentation for the response content is - * as follows 'This be a stream'. - * @throws SdkException - * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for - * catch all scenarios. - * @throws SdkClientException - * If any client side error occurs such as an IO related failure, failure to get credentials, etc. - * @throws JsonException - * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. - * @sample JsonClient.StreamingOutputOperation - * @see #getObject(streamingOutputOperation, ResponseTransformer) - * @see AWS API Documentation - */ - @Override - public ResponseBytes streamingOutputOperationAsBytes( - StreamingOutputOperationRequest streamingOutputOperationRequest) throws AwsServiceException, SdkClientException, - JsonException { - return streamingOutputOperation(streamingOutputOperationRequest, ResponseTransformer.toBytes()); + return invokeOperation(streamingOutputOperationRequest, + request -> delegate.streamingOutputOperation(request, responseTransformer)); } /** @@ -888,6 +443,10 @@ public SdkClient delegate() { return this.delegate; } + protected ReturnT invokeOperation(T request, Function operation) { + return operation.apply(request); + } + @Override public final JsonServiceClientConfiguration serviceClientConfiguration() { return delegate.serviceClientConfiguration(); From 0901d5857ff12d1e0ba1a6db81931e9350e2cfea Mon Sep 17 00:00:00 2001 From: hdavidh Date: Tue, 4 Jul 2023 23:16:36 -0700 Subject: [PATCH 03/17] Request compression - nonstreaming operations --- .../awssdk/core/compression/Compressor.java | 74 ++++++ .../core/compression/CompressorType.java | 116 +++++++++ .../internal/compression/GzipCompressor.java | 56 +++++ .../internal/http/AmazonAsyncHttpClient.java | 2 + .../internal/http/AmazonSyncHttpClient.java | 2 + .../pipeline/stages/CompressRequestStage.java | 221 ++++++++++++++++++ .../RequestCompressionConfigurationTest.java | 43 ++++ .../core/compression/CompressorTypeTest.java | 47 ++++ .../compression/GzipCompressorTest.java | 57 +++++ .../cloudwatch/CloudWatchIntegrationTest.java | 86 ++++++- .../customresponsemetadata/service-2.json | 23 ++ .../services/RequestCompressionTest.java | 179 ++++++++++++++ 12 files changed, 905 insertions(+), 1 deletion(-) create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/compression/Compressor.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/compression/CompressorType.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/RequestCompressionConfigurationTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/compression/Compressor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/compression/Compressor.java new file mode 100644 index 00000000000..f74400960b5 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/compression/Compressor.java @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.compression; + +import java.io.InputStream; +import java.nio.ByteBuffer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; + +/** + * Interface for compressors used by {@link CompressRequestStage} to compress requests. + */ +@SdkInternalApi +public interface Compressor { + + /** + * The compression algorithm type. + * + * @return The {@link String} compression algorithm type. + */ + String compressorType(); + + /** + * Compress a {@link SdkBytes} payload. + * + * @param content + * @return The compressed {@link SdkBytes}. + */ + SdkBytes compress(SdkBytes content); + + /** + * Compress a byte[] payload. + * + * @param content + * @return The compressed byte array. + */ + default byte[] compress(byte[] content) { + return compress(SdkBytes.fromByteArray(content)).asByteArray(); + } + + /** + * Compress an {@link InputStream} payload. + * + * @param content + * @return The compressed {@link InputStream}. + */ + default InputStream compress(InputStream content) { + return compress(SdkBytes.fromInputStream(content)).asInputStream(); + } + + /** + * Compress an {@link ByteBuffer} payload. + * + * @param content + * @return The compressed {@link ByteBuffer}. + */ + default ByteBuffer compress(ByteBuffer content) { + return compress(SdkBytes.fromByteBuffer(content)).asByteBuffer(); + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/compression/CompressorType.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/compression/CompressorType.java new file mode 100644 index 00000000000..4e403694fff --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/compression/CompressorType.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.compression; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.utils.Validate; + +/** + * The supported compression algorithms for operations with the requestCompression trait. Each supported algorithm will have an + * {@link Compressor} implementation. + */ +@SdkInternalApi +public final class CompressorType { + + public static final CompressorType GZIP = CompressorType.of("gzip"); + + private static Map compressorMap = new HashMap() {{ + put("gzip", new GzipCompressor()); + }}; + + private final String id; + + private CompressorType(String id) { + this.id = id; + } + + /** + * Creates a new {@link CompressorType} of the given value. + */ + public static CompressorType of(String value) { + Validate.paramNotBlank(value, "compressionType"); + return CompressorTypeCache.put(value); + } + + /** + * Returns the {@link Set} of {@link String}s of compressor types supported by the SDK. + */ + public static Set compressorTypes() { + return compressorMap.keySet(); + } + + /** + * Whether or not the compressor type is supported by the SDK. + */ + public static boolean isSupported(String compressionType) { + return compressorTypes().contains(compressionType); + } + + /** + * Maps the {@link CompressorType} to its corresponding {@link Compressor}. + */ + public Compressor newCompressor() { + Compressor compressor = compressorMap.getOrDefault(this.id, null); + if (compressor == null) { + throw new UnsupportedOperationException("The compression type " + id + " does not have an implementation of " + + "Compressor"); + } + return compressor; + } + + @Override + public String toString() { + return id; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompressorType that = (CompressorType) o; + return Objects.equals(id, that.id) + && Objects.equals(compressorMap, that.compressorMap); + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (compressorMap != null ? compressorMap.hashCode() : 0); + return result; + } + + private static class CompressorTypeCache { + private static final ConcurrentHashMap VALUES = new ConcurrentHashMap<>(); + + private CompressorTypeCache() { + } + + private static CompressorType put(String value) { + return VALUES.computeIfAbsent(value, v -> new CompressorType(value)); + } + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java new file mode 100644 index 00000000000..12396c18cb3 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/compression/GzipCompressor.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import static software.amazon.awssdk.utils.IoUtils.closeQuietly; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.zip.GZIPOutputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.compression.Compressor; + +@SdkInternalApi +public final class GzipCompressor implements Compressor { + + private static final String COMPRESSOR_TYPE = "gzip"; + private static final Logger log = LoggerFactory.getLogger(GzipCompressor.class); + + @Override + public String compressorType() { + return COMPRESSOR_TYPE; + } + + @Override + public SdkBytes compress(SdkBytes content) { + GZIPOutputStream gzipOutputStream = null; + try { + ByteArrayOutputStream compressedOutputStream = new ByteArrayOutputStream(); + gzipOutputStream = new GZIPOutputStream(compressedOutputStream); + gzipOutputStream.write(content.asByteArray()); + gzipOutputStream.close(); + return SdkBytes.fromByteArray(compressedOutputStream.toByteArray()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + closeQuietly(gzipOutputStream, log); + } + } +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java index 766b998fa71..0a7e7338ed3 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java @@ -38,6 +38,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncExecutionFailureExceptionReportingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncRetryableStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.AsyncSigningStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HttpChecksumStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeAsyncHttpRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeRequestImmutableStage; @@ -171,6 +172,7 @@ public CompletableFuture execute( .then(ApplyUserAgentStage::new) .then(MergeCustomHeadersStage::new) .then(MergeCustomQueryParamsStage::new) + .then(CompressRequestStage::new) .then(() -> new HttpChecksumStage(ClientType.ASYNC)) .then(MakeRequestImmutableStage::new) .then(RequestPipelineBuilder diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java index 75cab29c6f5..d465f7636f1 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java @@ -36,6 +36,7 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyUserAgentStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.BeforeTransmissionExecutionInterceptorsStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.BeforeUnmarshallingExecutionInterceptorsStage; +import software.amazon.awssdk.core.internal.http.pipeline.stages.CompressRequestStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.ExecutionFailureExceptionReportingStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HandleResponseStage; import software.amazon.awssdk.core.internal.http.pipeline.stages.HttpChecksumStage; @@ -172,6 +173,7 @@ public OutputT execute(HttpResponseHandler> response .then(ApplyUserAgentStage::new) .then(MergeCustomHeadersStage::new) .then(MergeCustomQueryParamsStage::new) + .then(CompressRequestStage::new) .then(() -> new HttpChecksumStage(ClientType.SYNC)) .then(MakeRequestImmutableStage::new) // End of mutating request diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java new file mode 100644 index 00000000000..af3cf2dd281 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java @@ -0,0 +1,221 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.http.pipeline.stages; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.compression.Compressor; +import software.amazon.awssdk.core.compression.CompressorType; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.http.RequestExecutionContext; +import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.utils.IoUtils; + +/** + * Compress requests whose operations are marked with the "requestCompression" C2J trait. + */ +@SdkInternalApi +public class CompressRequestStage implements MutableRequestToRequestPipeline { + private static final int DEFAULT_MIN_COMPRESSION_SIZE = 10_240; + private static final int MIN_COMPRESSION_SIZE_LIMIT = 10_485_760; + private static final Supplier PROFILE_FILE = ProfileFile::defaultProfileFile; + private static final String PROFILE_NAME = ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow(); + + @Override + public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, RequestExecutionContext context) + throws Exception { + + if (!shouldCompress(input, context)) { + return input; + } + + Compressor compressor = resolveCompressorType(context.executionAttributes()); + + // non-streaming + if (!isStreaming(context)) { + compressEntirePayload(input, compressor); + updateContentEncodingHeader(input, compressor); + updateContentLengthHeader(input); + } + + // TODO : streaming - sync & async + + return input; + } + + private static boolean shouldCompress(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { + if (context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION) == null) { + return false; + } + if (resolveCompressorType(context.executionAttributes()) == null) { + return false; + } + if (!resolveRequestCompressionEnabled(context)) { + return false; + } + if (isStreaming(context)) { + return true; + } + if (input.contentStreamProvider() == null) { + return false; + } + return isRequestSizeWithinThreshold(input, context); + } + + private static boolean isStreaming(RequestExecutionContext context) { + return context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).isStreaming(); + } + + private void compressEntirePayload(SdkHttpFullRequest.Builder input, Compressor compressor) { + ContentStreamProvider wrappedProvider = input.contentStreamProvider(); + ContentStreamProvider compressedStreamProvider = () -> compressor.compress(wrappedProvider.newStream()); + input.contentStreamProvider(compressedStreamProvider); + } + + private static void updateContentEncodingHeader(SdkHttpFullRequest.Builder input, + Compressor compressor) { + if (input.firstMatchingHeader("Content-encoding").isPresent()) { + input.appendHeader("Content-encoding", compressor.compressorType()); + } else { + input.putHeader("Content-encoding", compressor.compressorType()); + } + } + + private static void updateContentLengthHeader(SdkHttpFullRequest.Builder input) { + InputStream inputStream = input.contentStreamProvider().newStream(); + try { + byte[] bytes = IoUtils.toByteArray(inputStream); + String length = String.valueOf(bytes.length); + input.putHeader("Content-Length", length); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static Compressor resolveCompressorType(ExecutionAttributes executionAttributes) { + List encodings = + executionAttributes.getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).getEncodings(); + + for (String encoding: encodings) { + encoding = encoding.toLowerCase(Locale.ROOT); + if (CompressorType.isSupported(encoding)) { + return CompressorType.of(encoding).newCompressor(); + } + } + return null; + } + + private static boolean resolveRequestCompressionEnabled(RequestExecutionContext context) { + + if (context.originalRequest().overrideConfiguration().isPresent() + && context.originalRequest().overrideConfiguration().get().requestCompressionConfiguration().isPresent()) { + Boolean requestCompressionEnabled = context.originalRequest().overrideConfiguration().get() + .requestCompressionConfiguration().get() + .requestCompressionEnabled(); + if (requestCompressionEnabled != null) { + return requestCompressionEnabled; + } + } + + if (context.executionAttributes().getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) != null) { + Boolean requestCompressionEnabled = context.executionAttributes().getAttribute( + SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION).requestCompressionEnabled(); + if (requestCompressionEnabled != null) { + return requestCompressionEnabled; + } + } + + if (SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue().isPresent()) { + return !SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue().get(); + } + + Optional profileSetting = + PROFILE_FILE.get() + .profile(PROFILE_NAME) + .flatMap(p -> p.booleanProperty(ProfileProperty.DISABLE_REQUEST_COMPRESSION)); + if (profileSetting.isPresent()) { + return !profileSetting.get(); + } + + return true; + } + + private static boolean isRequestSizeWithinThreshold(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { + int minimumCompressionThreshold = resolveMinCompressionSize(context); + validateMinCompressionSizeInput(minimumCompressionThreshold); + int requestSize = SdkBytes.fromInputStream(input.contentStreamProvider().newStream()).asByteArray().length; + return requestSize >= minimumCompressionThreshold; + } + + private static int resolveMinCompressionSize(RequestExecutionContext context) { + + if (context.originalRequest().overrideConfiguration().isPresent() + && context.originalRequest().overrideConfiguration().get().requestCompressionConfiguration().isPresent()) { + Integer minCompressionSize = context.originalRequest().overrideConfiguration().get() + .requestCompressionConfiguration().get() + .minimumCompressionThresholdInBytes(); + if (minCompressionSize != null) { + return minCompressionSize; + } + } + + if (context.executionAttributes().getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) != null) { + Integer minCompressionSize = context.executionAttributes() + .getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) + .minimumCompressionThresholdInBytes(); + if (minCompressionSize != null) { + return minCompressionSize; + } + } + + if (SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue().isPresent()) { + return SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue().get(); + } + + Optional profileSetting = + PROFILE_FILE.get() + .profile(PROFILE_NAME) + .flatMap(p -> p.property(ProfileProperty.REQUEST_MIN_COMPRESSION_SIZE_BYTES)); + if (profileSetting.isPresent()) { + return Integer.parseInt(profileSetting.get()); + } + + return DEFAULT_MIN_COMPRESSION_SIZE; + } + + private static void validateMinCompressionSizeInput(int minCompressionSize) { + if (!(minCompressionSize >= 0 && minCompressionSize <= MIN_COMPRESSION_SIZE_LIMIT)) { + throw SdkClientException.create("The minimum compression size must be non-negative with a maximum value of " + + "10485760.", new IllegalArgumentException()); + } + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/RequestCompressionConfigurationTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/RequestCompressionConfigurationTest.java new file mode 100644 index 00000000000..2740e9c4b14 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/RequestCompressionConfigurationTest.java @@ -0,0 +1,43 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class RequestCompressionConfigurationTest { + + @Test + public void equalsHashcode() { + EqualsVerifier.forClass(RequestCompressionConfiguration.class) + .withNonnullFields("requestCompressionEnabled", "minimumCompressionThresholdInBytes") + .verify(); + } + + @Test + public void toBuilder() { + RequestCompressionConfiguration configuration = + RequestCompressionConfiguration.builder() + .requestCompressionEnabled(true) + .minimumCompressionThresholdInBytes(99999) + .build(); + + RequestCompressionConfiguration another = configuration.toBuilder().build(); + assertThat(configuration).isEqualTo(another); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java new file mode 100644 index 00000000000..c0c0af4bdaa --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.compression; + +import static org.assertj.core.api.Assertions.assertThat; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class CompressorTypeTest { + + @Test + public void equalsHashcode() { + EqualsVerifier.forClass(CompressorType.class) + .withNonnullFields("id") + .verify(); + } + + @Test + public void compressorType_gzip() { + CompressorType gzip = CompressorType.GZIP; + CompressorType gzipFromString = CompressorType.of("gzip"); + assertThat(gzip).isSameAs(gzipFromString); + assertThat(gzip).isEqualTo(gzipFromString); + } + + @Test + public void compressorType_usesSameInstance_when_sameCompressorTypeOfSameValue() { + CompressorType brotliFromString = CompressorType.of("brotli"); + CompressorType brotliFromStringDuplicate = CompressorType.of("brotli"); + assertThat(brotliFromString).isSameAs(brotliFromStringDuplicate); + assertThat(brotliFromString).isEqualTo(brotliFromStringDuplicate); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java new file mode 100644 index 00000000000..b9b4d7d77de --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java @@ -0,0 +1,57 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.compression; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.core.Is.is; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.zip.GZIPInputStream; +import org.junit.Test; +import software.amazon.awssdk.core.compression.Compressor; + +public class GzipCompressorTest { + private static final Compressor gzipCompressor = new GzipCompressor(); + private static final String COMPRESSABLE_STRING = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + + @Test + public void compressedData_decompressesCorrectly() throws IOException { + byte[] originalData = COMPRESSABLE_STRING.getBytes(StandardCharsets.UTF_8); + byte[] compressedData = gzipCompressor.compress(originalData); + + int uncompressedSize = originalData.length; + int compressedSize = compressedData.length; + assertThat(compressedSize, lessThan(uncompressedSize)); + + ByteArrayInputStream bais = new ByteArrayInputStream(compressedData); + GZIPInputStream gzipInputStream = new GZIPInputStream(bais); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int bytesRead; + while ((bytesRead = gzipInputStream.read(buffer)) != -1) { + baos.write(buffer, 0, bytesRead); + } + gzipInputStream.close(); + byte[] decompressedData = baos.toByteArray(); + + assertThat(decompressedData, is(originalData)); + } +} diff --git a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java index 01722140044..e530b0fcae3 100644 --- a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java +++ b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java @@ -39,8 +39,12 @@ import org.junit.BeforeClass; import org.junit.Test; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.RequestCompressionConfiguration; import software.amazon.awssdk.core.SdkGlobalTime; import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.interceptor.trait.RequestCompression; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.cloudwatch.model.Datapoint; import software.amazon.awssdk.services.cloudwatch.model.DeleteAlarmsRequest; @@ -108,7 +112,6 @@ public static void cleanupAlarms() { /** * Tests putting metrics and then getting them back. */ - @Test public void put_get_metricdata_list_metric_returns_success() throws InterruptedException { @@ -164,6 +167,87 @@ public void put_get_metricdata_list_metric_returns_success() throws assertTrue(seenDimensions); } + /** + * Tests putting metrics with request compression and then getting them back. + * TODO: We can remove this test once CloudWatch adds "RequestCompression" trait to PutMetricData + */ + @Test + public void put_get_metricdata_list_metric_withRequestCompression_returns_success() { + + RequestCompression requestCompressionTrait = RequestCompression.builder() + .encodings("gzip") + .isStreaming(false) + .build(); + RequestCompressionConfiguration compressionConfiguration = + RequestCompressionConfiguration.builder() + // uncompressed payload is 404 bytes + .minimumCompressionThresholdInBytes(100) + .build(); + + CloudWatchClient requestCompressionClient = + CloudWatchClient.builder() + .credentialsProvider(getCredentialsProvider()) + .region(Region.US_WEST_2) + .overrideConfiguration(c -> c.putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait) + .putExecutionAttribute( + SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, + compressionConfiguration)) + .build(); + + String measureName = this.getClass().getName() + System.currentTimeMillis(); + + MetricDatum datum = MetricDatum.builder().dimensions( + Dimension.builder().name("InstanceType").value("m1.small").build()) + .metricName(measureName).timestamp(Instant.now()) + .unit("Count").value(42.0).build(); + + requestCompressionClient.putMetricData(PutMetricDataRequest.builder() + .namespace("AWS.EC2").metricData(datum).build()); + + GetMetricStatisticsResponse result = + Waiter.run(() -> requestCompressionClient + .getMetricStatistics(r -> r.startTime(Instant.now().minus(Duration.ofDays(7))) + .namespace("AWS.EC2") + .period(60 * 60) + .dimensions(Dimension.builder().name("InstanceType") + .value("m1.small").build()) + .metricName(measureName) + .statisticsWithStrings("Average", "Maximum", "Minimum", "Sum") + .endTime(Instant.now()))) + .until(r -> r.datapoints().size() == 1) + .orFailAfter(Duration.ofMinutes(2)); + + assertNotNull(result.label()); + assertEquals(measureName, result.label()); + + assertEquals(1, result.datapoints().size()); + for (Datapoint datapoint : result.datapoints()) { + assertEquals(datum.value(), datapoint.average()); + assertEquals(datum.value(), datapoint.maximum()); + assertEquals(datum.value(), datapoint.minimum()); + assertEquals(datum.value(), datapoint.sum()); + assertNotNull(datapoint.timestamp()); + assertEquals(datum.unit(), datapoint.unit()); + } + + ListMetricsResponse listResult = requestCompressionClient.listMetrics(ListMetricsRequest.builder().build()); + + boolean seenDimensions = false; + assertTrue(listResult.metrics().size() > 0); + for (Metric metric : listResult.metrics()) { + assertNotNull(metric.metricName()); + assertNotNull(metric.namespace()); + + for (Dimension dimension : metric.dimensions()) { + seenDimensions = true; + assertNotNull(dimension.name()); + assertNotNull(dimension.value()); + } + } + assertTrue(seenDimensions); + } + /** * Tests setting the state for an alarm and reading its history. */ diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json index 6b1cb368d48..b1f994fd1d4 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json @@ -277,6 +277,18 @@ "requestAlgorithmMember": "ChecksumAlgorithm" } }, + "PutOperationWithRequestCompression":{ + "name":"PutOperationWithRequestCompression", + "http":{ + "method":"PUT", + "requestUri":"/" + }, + "input":{"shape":"RequestCompressionStructure"}, + "output":{"shape":"RequestCompressionStructure"}, + "requestCompression": { + "encodings": ["gzip"] + } + }, "GetOperationWithChecksum":{ "name":"GetOperationWithChecksum", "http":{ @@ -1007,6 +1019,17 @@ } }, "payload":"NestedQueryParameterOperation" + }, + "RequestCompressionStructure":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "documentation":"

Object data.

", + "streaming":false + } + }, + "payload":"Body" } } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java new file mode 100644 index 00000000000..815e6ad3291 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java @@ -0,0 +1,179 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.InputStream; +import java.time.Duration; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.AfterEach; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithRequestCompressionRequest; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; + +public class RequestCompressionTest { + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private String compressedBody; + private int compressedLen; + MockSyncHttpClient mockHttpClient; + MockAsyncHttpClient mockAsyncHttpClient; + ProtocolRestJsonClient syncClient; + ProtocolRestJsonAsyncClient asyncClient; + Compressor compressor; + + @BeforeEach + public void setUp() { + mockHttpClient = new MockSyncHttpClient(); + mockAsyncHttpClient = new MockAsyncHttpClient(); + syncClient = ProtocolRestJsonClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_EAST_1) + .httpClient(mockHttpClient) + .build(); + asyncClient = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_EAST_1) + .httpClient(mockAsyncHttpClient) + .build(); + compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)).asByteArray(); + compressedLen = compressedBodyBytes.length; + compressedBody = new String(compressedBodyBytes); + } + + @AfterEach + public void reset() { + mockHttpClient.reset(); + mockAsyncHttpClient.reset(); + } + + @Test + public void sync_nonStreaming_compression_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.requestCompressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + syncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void async_nonStreaming_compression_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.requestCompressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void sync_nonStreaming_compression_withRetry_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.requestCompressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + syncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void async_nonStreaming_compression_withRetry_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.requestCompressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + private HttpExecuteResponse mockResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + } + + private HttpExecuteResponse mockErrorResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(500).build()) + .build(); + } +} From 1dff66733b3fdc0253895a3a5f56fbe9ca2ecc60 Mon Sep 17 00:00:00 2001 From: hdavidh Date: Mon, 17 Jul 2023 14:11:21 -0700 Subject: [PATCH 04/17] Throw codegen error for streaming operations with request compression --- .../codegen/poet/client/traits/RequestCompressionTrait.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java index 341feeb0f50..57f89f96cc0 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java @@ -43,6 +43,12 @@ public static CodeBlock create(OperationModel operationModel) { return CodeBlock.of(""); } + // TODO : remove once request compression for streaming operations is supported + if (operationModel.isStreaming()) { + throw new IllegalStateException("Request compression for streaming operations is not yet supported in the AWS SDK " + + "for Java."); + } + List encodings = operationModel.getRequestCompression().getEncodings(); return CodeBlock.builder() From c22db110810bc9fa6ecd77d16bb45f01e5a38bdb Mon Sep 17 00:00:00 2001 From: hdavidh Date: Mon, 17 Jul 2023 14:11:44 -0700 Subject: [PATCH 05/17] Throw codegen error for S3 operations with request compression --- .../codegen/poet/client/specs/JsonProtocolSpec.java | 4 ++-- .../codegen/poet/client/specs/QueryProtocolSpec.java | 4 ++-- .../awssdk/codegen/poet/client/specs/XmlProtocolSpec.java | 4 ++-- .../poet/client/traits/RequestCompressionTrait.java | 8 +++++++- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index 9db5ee5c7c6..44922d4e2b3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -189,7 +189,7 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) - .add(RequestCompressionTrait.create(opModel)); + .add(RequestCompressionTrait.create(opModel, model)); if (opModel.hasStreamingInput()) { codeBlock.add(".withRequestBody(requestBody)") @@ -259,7 +259,7 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) - .add(RequestCompressionTrait.create(opModel)) + .add(RequestCompressionTrait.create(opModel, model)) .add(".withInput($L)$L);", opModel.getInput().getVariableName(), asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java index faacdefcd94..daef19b9def 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java @@ -118,7 +118,7 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) - .add(RequestCompressionTrait.create(opModel)); + .add(RequestCompressionTrait.create(opModel, intermediateModel)); if (opModel.hasStreamingInput()) { @@ -154,7 +154,7 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) - .add(RequestCompressionTrait.create(opModel)); + .add(RequestCompressionTrait.create(opModel, intermediateModel)); builder.add(hostPrefixExpression(opModel) + asyncRequestBody + ".withInput($L)$L);", diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java index 6374d8accc3..3f58b49edc7 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/XmlProtocolSpec.java @@ -137,7 +137,7 @@ public CodeBlock executionHandler(OperationModel opModel) { .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) - .add(RequestCompressionTrait.create(opModel)); + .add(RequestCompressionTrait.create(opModel, model)); s3ArnableFields(opModel, model).ifPresent(codeBlock::add); @@ -216,7 +216,7 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper .add(HttpChecksumRequiredTrait.putHttpChecksumAttribute(opModel)) .add(HttpChecksumTrait.create(opModel)) .add(NoneAuthTypeRequestTrait.create(opModel)) - .add(RequestCompressionTrait.create(opModel)); + .add(RequestCompressionTrait.create(opModel, model)); s3ArnableFields(opModel, model).ifPresent(builder::add); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java index 57f89f96cc0..6e09232ade2 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java @@ -18,6 +18,7 @@ import com.squareup.javapoet.CodeBlock; import java.util.List; import java.util.stream.Collectors; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; @@ -38,7 +39,7 @@ private RequestCompressionTrait() { * context of initializing {@link ClientExecutionParams}. If request compression is not required by the operation, this will * return an empty code-block. */ - public static CodeBlock create(OperationModel operationModel) { + public static CodeBlock create(OperationModel operationModel, IntermediateModel model) { if (operationModel.getRequestCompression() == null) { return CodeBlock.of(""); } @@ -49,6 +50,11 @@ public static CodeBlock create(OperationModel operationModel) { + "for Java."); } + // TODO : remove once S3 checksum interceptors are moved to occur after CompressRequestStage + if (model.getMetadata().getServiceName().equals("S3")) { + throw new IllegalStateException("Request compression for S3 is not yet supported in the AWS SDK for Java."); + } + List encodings = operationModel.getRequestCompression().getEncodings(); return CodeBlock.builder() From 6dc359d0a754071e8645fd440d4104b7293d4e98 Mon Sep 17 00:00:00 2001 From: hdavidh Date: Mon, 17 Jul 2023 14:29:21 -0700 Subject: [PATCH 06/17] Changelog entry --- .changes/next-release/feature-AWSSDKforJavav2-e418083.json | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .changes/next-release/feature-AWSSDKforJavav2-e418083.json diff --git a/.changes/next-release/feature-AWSSDKforJavav2-e418083.json b/.changes/next-release/feature-AWSSDKforJavav2-e418083.json new file mode 100644 index 00000000000..e379cd4ce79 --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-e418083.json @@ -0,0 +1,6 @@ +{ + "category": "AWS SDK for Java v2", + "contributor": "", + "type": "feature", + "description": "Adding support for \"requestCompression\" trait to GZIP compress payloads for non-streaming operations." +} From b1e4d0c0cf96f312ee33a3ae1bffa3641ed1f618 Mon Sep 17 00:00:00 2001 From: hdavidh Date: Tue, 18 Jul 2023 12:09:42 -0700 Subject: [PATCH 07/17] Refactoring --- .../core/RequestCompressionConfiguration.java | 3 +- .../compression/Compressor.java | 2 +- .../compression/CompressorType.java | 3 +- .../internal/compression/GzipCompressor.java | 1 - .../pipeline/stages/CompressRequestStage.java | 90 ++++++++++++------- .../core/compression/CompressorTypeTest.java | 1 + .../compression/GzipCompressorTest.java | 1 - .../services/RequestCompressionTest.java | 2 +- 8 files changed, 66 insertions(+), 37 deletions(-) rename core/sdk-core/src/main/java/software/amazon/awssdk/core/{ => internal}/compression/Compressor.java (97%) rename core/sdk-core/src/main/java/software/amazon/awssdk/core/{ => internal}/compression/CompressorType.java (96%) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java index e4124024e6f..2eec4a76d6f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java @@ -98,7 +98,8 @@ public interface Builder extends CopyableBuilder PROFILE_FILE = ProfileFile::defaultProfileFile; private static final String PROFILE_NAME = ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow(); + private static Boolean compressionEnabledClientLevel; + private static Boolean compressionEnabledEnvLevel; + private static Boolean compressionEnabledProfileLevel; + private static int minCompressionSizeClientLevel = -1; + private static int minCompressionSizeEnvLevel = -1; + private static int minCompressionSizeProfileLevel = -1; + @Override public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, RequestExecutionContext context) @@ -136,34 +145,44 @@ private static Compressor resolveCompressorType(ExecutionAttributes executionAtt private static boolean resolveRequestCompressionEnabled(RequestExecutionContext context) { - if (context.originalRequest().overrideConfiguration().isPresent() - && context.originalRequest().overrideConfiguration().get().requestCompressionConfiguration().isPresent()) { - Boolean requestCompressionEnabled = context.originalRequest().overrideConfiguration().get() - .requestCompressionConfiguration().get() - .requestCompressionEnabled(); - if (requestCompressionEnabled != null) { - return requestCompressionEnabled; - } + Optional requestCompressionEnabledRequestLevel = + context.originalRequest().overrideConfiguration() + .flatMap(RequestOverrideConfiguration::requestCompressionConfiguration) + .map(RequestCompressionConfiguration::requestCompressionEnabled); + if (requestCompressionEnabledRequestLevel.isPresent()) { + return requestCompressionEnabledRequestLevel.get(); } + if (compressionEnabledClientLevel != null) { + return compressionEnabledClientLevel; + } if (context.executionAttributes().getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) != null) { - Boolean requestCompressionEnabled = context.executionAttributes().getAttribute( + Boolean requestCompressionEnabledClientLevel = context.executionAttributes().getAttribute( SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION).requestCompressionEnabled(); - if (requestCompressionEnabled != null) { - return requestCompressionEnabled; + if (requestCompressionEnabledClientLevel != null) { + compressionEnabledClientLevel = requestCompressionEnabledClientLevel; + return compressionEnabledClientLevel; } } + if (compressionEnabledEnvLevel != null) { + return compressionEnabledEnvLevel; + } if (SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue().isPresent()) { - return !SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue().get(); + compressionEnabledEnvLevel = !SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue().get(); + return compressionEnabledEnvLevel; } + if (compressionEnabledProfileLevel != null) { + return compressionEnabledProfileLevel; + } Optional profileSetting = PROFILE_FILE.get() .profile(PROFILE_NAME) .flatMap(p -> p.booleanProperty(ProfileProperty.DISABLE_REQUEST_COMPRESSION)); if (profileSetting.isPresent()) { - return !profileSetting.get(); + compressionEnabledProfileLevel = !profileSetting.get(); + return compressionEnabledProfileLevel; } return true; @@ -178,35 +197,46 @@ private static boolean isRequestSizeWithinThreshold(SdkHttpFullRequest.Builder i private static int resolveMinCompressionSize(RequestExecutionContext context) { - if (context.originalRequest().overrideConfiguration().isPresent() - && context.originalRequest().overrideConfiguration().get().requestCompressionConfiguration().isPresent()) { - Integer minCompressionSize = context.originalRequest().overrideConfiguration().get() - .requestCompressionConfiguration().get() - .minimumCompressionThresholdInBytes(); - if (minCompressionSize != null) { - return minCompressionSize; - } + Optional minimumCompressionSizeRequestLevel = + context.originalRequest().overrideConfiguration() + .flatMap(RequestOverrideConfiguration::requestCompressionConfiguration) + .map(RequestCompressionConfiguration::minimumCompressionThresholdInBytes); + if (minimumCompressionSizeRequestLevel.isPresent()) { + return minimumCompressionSizeRequestLevel.get(); } + if (minCompressionSizeClientLevel >= 0) { + return minCompressionSizeClientLevel; + } if (context.executionAttributes().getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) != null) { - Integer minCompressionSize = context.executionAttributes() - .getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) - .minimumCompressionThresholdInBytes(); - if (minCompressionSize != null) { - return minCompressionSize; + Integer minimumCompressionSizeClientLevel = + context.executionAttributes() + .getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) + .minimumCompressionThresholdInBytes(); + if (minimumCompressionSizeClientLevel != null) { + minCompressionSizeClientLevel = minimumCompressionSizeClientLevel; + return minCompressionSizeClientLevel; } } + if (minCompressionSizeEnvLevel >= 0) { + return minCompressionSizeEnvLevel; + } if (SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue().isPresent()) { - return SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue().get(); + minCompressionSizeEnvLevel = SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue().get(); + return minCompressionSizeEnvLevel; } + if (minCompressionSizeProfileLevel >= 0) { + return minCompressionSizeProfileLevel; + } Optional profileSetting = PROFILE_FILE.get() .profile(PROFILE_NAME) .flatMap(p -> p.property(ProfileProperty.REQUEST_MIN_COMPRESSION_SIZE_BYTES)); if (profileSetting.isPresent()) { - return Integer.parseInt(profileSetting.get()); + minCompressionSizeProfileLevel = Integer.parseInt(profileSetting.get()); + return minCompressionSizeProfileLevel; } return DEFAULT_MIN_COMPRESSION_SIZE; diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java index c0c0af4bdaa..f67315b8e5d 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/compression/CompressorTypeTest.java @@ -19,6 +19,7 @@ import nl.jqno.equalsverifier.EqualsVerifier; import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.internal.compression.CompressorType; public class CompressorTypeTest { diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java index b9b4d7d77de..24fb71940f6 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/compression/GzipCompressorTest.java @@ -25,7 +25,6 @@ import java.nio.charset.StandardCharsets; import java.util.zip.GZIPInputStream; import org.junit.Test; -import software.amazon.awssdk.core.compression.Compressor; public class GzipCompressorTest { private static final Compressor gzipCompressor = new GzipCompressor(); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java index 815e6ad3291..1aacc4de3e1 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.AfterEach; import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.core.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.Compressor; import software.amazon.awssdk.core.internal.compression.GzipCompressor; import software.amazon.awssdk.http.HttpExecuteResponse; import software.amazon.awssdk.http.SdkHttpFullRequest; From 942a5ad097ac9c80fb03abcdf10405b465b674a6 Mon Sep 17 00:00:00 2001 From: hdavidh Date: Thu, 20 Jul 2023 09:26:10 -0700 Subject: [PATCH 08/17] Refactoring --- .../awssdk/services/RequestCompressionTest.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java index 1aacc4de3e1..e66f5f47bd1 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java @@ -41,11 +41,11 @@ public class RequestCompressionTest { "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; private String compressedBody; private int compressedLen; - MockSyncHttpClient mockHttpClient; - MockAsyncHttpClient mockAsyncHttpClient; - ProtocolRestJsonClient syncClient; - ProtocolRestJsonAsyncClient asyncClient; - Compressor compressor; + private MockSyncHttpClient mockHttpClient; + private MockAsyncHttpClient mockAsyncHttpClient; + private ProtocolRestJsonClient syncClient; + private ProtocolRestJsonAsyncClient asyncClient; + private Compressor compressor; @BeforeEach public void setUp() { @@ -143,7 +143,7 @@ public void sync_nonStreaming_compression_withRetry_compressesCorrectly() { @Test public void async_nonStreaming_compression_withRetry_compressesCorrectly() { - mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockAsyncHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); PutOperationWithRequestCompressionRequest request = From 8364ba40e6363d6d80d64e0878f7db57cbe934ae Mon Sep 17 00:00:00 2001 From: hdavidh Date: Thu, 20 Jul 2023 12:42:32 -0700 Subject: [PATCH 09/17] Move interceptor trait to internal module --- .../codegen/poet/client/traits/RequestCompressionTrait.java | 2 +- .../codegen/poet/client/test-aws-json-async-client-class.java | 2 +- .../codegen/poet/client/test-json-async-client-class.java | 2 +- .../awssdk/codegen/poet/client/test-json-client-class.java | 2 +- .../codegen/poet/client/test-query-async-client-class.java | 2 +- .../awssdk/codegen/poet/client/test-query-client-class.java | 2 +- .../awssdk/codegen/poet/client/test-xml-async-client-class.java | 2 +- .../awssdk/codegen/poet/client/test-xml-client-class.java | 2 +- .../awssdk/core/interceptor/SdkInternalExecutionAttribute.java | 2 +- .../{ => internal}/interceptor/trait/RequestCompression.java | 2 +- .../awssdk/services/cloudwatch/CloudWatchIntegrationTest.java | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) rename core/sdk-core/src/main/java/software/amazon/awssdk/core/{ => internal}/interceptor/trait/RequestCompression.java (97%) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java index 6e09232ade2..102e1843d6e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java @@ -24,7 +24,7 @@ import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; /** * The logic for handling the "requestCompression" trait within the code generator. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java index d7447337ee2..e3e0accafef 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java @@ -40,7 +40,7 @@ import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java index 85ea70c1c3e..227de830803 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java @@ -44,7 +44,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.protocol.VoidSdkResponse; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index f3b0920472f..4cb9f1a16e5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -22,7 +22,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index 8b5ad5d8bc6..b4debbf3eb5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -30,7 +30,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index 1c4c1af4b10..52d7dc7863a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -21,7 +21,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java index a4745ebebe3..026c38a867c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java @@ -36,7 +36,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.AsyncStreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java index 0cc6ae02423..c96b3442c34 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java @@ -22,7 +22,7 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller; import software.amazon.awssdk.core.signer.Signer; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java index f0d51c8cb32..75e999bc102 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java @@ -18,7 +18,7 @@ import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.endpoints.Endpoint; import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.SdkHttpExecutionAttributes; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/RequestCompression.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java similarity index 97% rename from core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/RequestCompression.java rename to core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java index ecb452aed30..5be35f0ae46 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/trait/RequestCompression.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/interceptor/trait/RequestCompression.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.core.interceptor.trait; +package software.amazon.awssdk.core.internal.interceptor.trait; import java.util.Arrays; import java.util.List; diff --git a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java index e530b0fcae3..957c1c74c08 100644 --- a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java +++ b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java @@ -44,7 +44,7 @@ import software.amazon.awssdk.core.exception.SdkServiceException; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; -import software.amazon.awssdk.core.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.cloudwatch.model.Datapoint; import software.amazon.awssdk.services.cloudwatch.model.DeleteAlarmsRequest; From b95a7212b6fefb2d3610e574c79b2af342ad3ce7 Mon Sep 17 00:00:00 2001 From: hdavidh Date: Thu, 20 Jul 2023 12:42:53 -0700 Subject: [PATCH 10/17] Update javadoc --- .../amazon/awssdk/core/RequestCompressionConfiguration.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java index 2eec4a76d6f..4a6abf69f36 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/RequestCompressionConfiguration.java @@ -90,7 +90,8 @@ public int hashCode() { public interface Builder extends CopyableBuilder { /** - * Configures whether request compression is enabled or not. The default value is true. + * Configures whether request compression is enabled or not, for operations that have the "requestCompression" C2J trait. + * The default value is true. * * @param requestCompressionEnabled * @return This object for method chaining. From 878b2e5aa289315d6be5f40beeeeaf5b84ec08d0 Mon Sep 17 00:00:00 2001 From: hdavidh Date: Thu, 20 Jul 2023 14:10:40 -0700 Subject: [PATCH 11/17] Move config resolution logic to SdkDefaultClientBuilder --- .../builder/SdkDefaultClientBuilder.java | 59 ++++++++++++- .../pipeline/stages/CompressRequestStage.java | 86 +++---------------- 2 files changed, 65 insertions(+), 80 deletions(-) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 596caf82a0a..b76064c8d7e 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -64,6 +64,8 @@ import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.core.RequestCompressionConfiguration; +import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.client.config.ClientAsyncConfiguration; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -83,9 +85,11 @@ import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.profiles.Profile; import software.amazon.awssdk.profiles.ProfileFile; import software.amazon.awssdk.profiles.ProfileFileSupplier; import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.profiles.ProfileProperty; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.Either; import software.amazon.awssdk.utils.ScheduledExecutorUtils; @@ -238,8 +242,6 @@ private SdkClientConfiguration setOverrides(SdkClientConfiguration configuration builder.option(METRIC_PUBLISHERS, clientOverrideConfiguration.metricPublishers()); builder.option(EXECUTION_ATTRIBUTES, clientOverrideConfiguration.executionAttributes()); builder.option(TOKEN_SIGNER, clientOverrideConfiguration.advancedOption(TOKEN_SIGNER).orElse(null)); - builder.option(REQUEST_COMPRESSION_CONFIGURATION, - clientOverrideConfiguration.requestCompressionConfiguration().orElse(null)); clientOverrideConfiguration.advancedOption(ENDPOINT_OVERRIDDEN_OVERRIDE).ifPresent(value -> { builder.option(ENDPOINT_OVERRIDDEN, value); @@ -318,9 +320,60 @@ private SdkClientConfiguration finalizeConfiguration(SdkClientConfiguration conf .option(EXECUTION_INTERCEPTORS, resolveExecutionInterceptors(config)) .option(RETRY_POLICY, retryPolicy) .option(CLIENT_USER_AGENT, resolveClientUserAgent(config, retryPolicy)) + .option(REQUEST_COMPRESSION_CONFIGURATION, resolveRequestCompressionConfiguration()) .build(); } + private RequestCompressionConfiguration resolveRequestCompressionConfiguration() { + Boolean requestCompressionEnabled = null; + Integer minCompressionThreshold = null; + + // Client level + RequestCompressionConfiguration clientConfig = + clientOverrideConfiguration.requestCompressionConfiguration().orElse(null); + if (clientConfig != null) { + requestCompressionEnabled = clientConfig.requestCompressionEnabled(); + minCompressionThreshold = clientConfig.minimumCompressionThresholdInBytes(); + } + + // Env level + if (requestCompressionEnabled == null) { + Optional systemSetting = SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue(); + if (systemSetting.isPresent()) { + requestCompressionEnabled = !systemSetting.get(); + } + } + if (minCompressionThreshold == null) { + minCompressionThreshold = + SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue().orElse(null); + } + + // Profile level + if (requestCompressionEnabled == null || minCompressionThreshold == null) { + Supplier profileFileSupplier = ProfileFile::defaultProfileFile; + String profileName = ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow(); + Profile profile = profileFileSupplier.get().profile(profileName).orElse(null); + + if (requestCompressionEnabled == null && profile != null) { + Optional profileSetting = profile.booleanProperty(ProfileProperty.DISABLE_REQUEST_COMPRESSION); + if (profileSetting.isPresent()) { + requestCompressionEnabled = !profileSetting.get(); + } + } + if (minCompressionThreshold == null && profile != null) { + Optional profileSetting = profile.property(ProfileProperty.REQUEST_MIN_COMPRESSION_SIZE_BYTES); + if (profileSetting.isPresent()) { + minCompressionThreshold = Integer.parseInt(profileSetting.get()); + } + } + } + + return RequestCompressionConfiguration.builder() + .requestCompressionEnabled(requestCompressionEnabled) + .minimumCompressionThresholdInBytes(minCompressionThreshold) + .build(); + } + private String resolveClientUserAgent(SdkClientConfiguration config, RetryPolicy retryPolicy) { return ApplyUserAgentStage.resolveClientUserAgent(config.option(USER_AGENT_PREFIX), config.option(INTERNAL_USER_AGENT), @@ -580,6 +633,4 @@ public void close() { // Do nothing, this client is managed by the customer. } } - - } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java index b49af152745..9df24fd004a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java @@ -21,12 +21,10 @@ import java.util.List; import java.util.Locale; import java.util.Optional; -import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.RequestCompressionConfiguration; import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; @@ -37,9 +35,6 @@ import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.SdkHttpFullRequest; -import software.amazon.awssdk.profiles.ProfileFile; -import software.amazon.awssdk.profiles.ProfileFileSystemSetting; -import software.amazon.awssdk.profiles.ProfileProperty; import software.amazon.awssdk.utils.IoUtils; /** @@ -49,15 +44,6 @@ public class CompressRequestStage implements MutableRequestToRequestPipeline { private static final int DEFAULT_MIN_COMPRESSION_SIZE = 10_240; private static final int MIN_COMPRESSION_SIZE_LIMIT = 10_485_760; - private static final Supplier PROFILE_FILE = ProfileFile::defaultProfileFile; - private static final String PROFILE_NAME = ProfileFileSystemSetting.AWS_PROFILE.getStringValueOrThrow(); - private static Boolean compressionEnabledClientLevel; - private static Boolean compressionEnabledEnvLevel; - private static Boolean compressionEnabledProfileLevel; - private static int minCompressionSizeClientLevel = -1; - private static int minCompressionSizeEnvLevel = -1; - private static int minCompressionSizeProfileLevel = -1; - @Override public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, RequestExecutionContext context) @@ -153,36 +139,11 @@ private static boolean resolveRequestCompressionEnabled(RequestExecutionContext return requestCompressionEnabledRequestLevel.get(); } - if (compressionEnabledClientLevel != null) { - return compressionEnabledClientLevel; - } - if (context.executionAttributes().getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) != null) { - Boolean requestCompressionEnabledClientLevel = context.executionAttributes().getAttribute( - SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION).requestCompressionEnabled(); - if (requestCompressionEnabledClientLevel != null) { - compressionEnabledClientLevel = requestCompressionEnabledClientLevel; - return compressionEnabledClientLevel; - } - } - - if (compressionEnabledEnvLevel != null) { - return compressionEnabledEnvLevel; - } - if (SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue().isPresent()) { - compressionEnabledEnvLevel = !SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue().get(); - return compressionEnabledEnvLevel; - } - - if (compressionEnabledProfileLevel != null) { - return compressionEnabledProfileLevel; - } - Optional profileSetting = - PROFILE_FILE.get() - .profile(PROFILE_NAME) - .flatMap(p -> p.booleanProperty(ProfileProperty.DISABLE_REQUEST_COMPRESSION)); - if (profileSetting.isPresent()) { - compressionEnabledProfileLevel = !profileSetting.get(); - return compressionEnabledProfileLevel; + Boolean isEnabled = context.executionAttributes() + .getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) + .requestCompressionEnabled(); + if (isEnabled != null) { + return isEnabled; } return true; @@ -205,38 +166,11 @@ private static int resolveMinCompressionSize(RequestExecutionContext context) { return minimumCompressionSizeRequestLevel.get(); } - if (minCompressionSizeClientLevel >= 0) { - return minCompressionSizeClientLevel; - } - if (context.executionAttributes().getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) != null) { - Integer minimumCompressionSizeClientLevel = - context.executionAttributes() - .getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) - .minimumCompressionThresholdInBytes(); - if (minimumCompressionSizeClientLevel != null) { - minCompressionSizeClientLevel = minimumCompressionSizeClientLevel; - return minCompressionSizeClientLevel; - } - } - - if (minCompressionSizeEnvLevel >= 0) { - return minCompressionSizeEnvLevel; - } - if (SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue().isPresent()) { - minCompressionSizeEnvLevel = SdkSystemSetting.AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES.getIntegerValue().get(); - return minCompressionSizeEnvLevel; - } - - if (minCompressionSizeProfileLevel >= 0) { - return minCompressionSizeProfileLevel; - } - Optional profileSetting = - PROFILE_FILE.get() - .profile(PROFILE_NAME) - .flatMap(p -> p.property(ProfileProperty.REQUEST_MIN_COMPRESSION_SIZE_BYTES)); - if (profileSetting.isPresent()) { - minCompressionSizeProfileLevel = Integer.parseInt(profileSetting.get()); - return minCompressionSizeProfileLevel; + Integer threshold = context.executionAttributes() + .getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) + .minimumCompressionThresholdInBytes(); + if (threshold != null) { + return threshold; } return DEFAULT_MIN_COMPRESSION_SIZE; From a78943f9db903f41661a69f1d39b039c7e72a14e Mon Sep 17 00:00:00 2001 From: hdavidh Date: Thu, 20 Jul 2023 14:54:07 -0700 Subject: [PATCH 12/17] Null check for clientOverrideConfiguration --- .../client/builder/SdkDefaultClientBuilder.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index b76064c8d7e..49b4f6a97b5 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -329,13 +329,16 @@ private RequestCompressionConfiguration resolveRequestCompressionConfiguration() Integer minCompressionThreshold = null; // Client level - RequestCompressionConfiguration clientConfig = - clientOverrideConfiguration.requestCompressionConfiguration().orElse(null); - if (clientConfig != null) { - requestCompressionEnabled = clientConfig.requestCompressionEnabled(); - minCompressionThreshold = clientConfig.minimumCompressionThresholdInBytes(); + if (clientOverrideConfiguration != null) { + RequestCompressionConfiguration clientConfig = + clientOverrideConfiguration.requestCompressionConfiguration().orElse(null); + if (clientConfig != null) { + requestCompressionEnabled = clientConfig.requestCompressionEnabled(); + minCompressionThreshold = clientConfig.minimumCompressionThresholdInBytes(); + } } + // Env level if (requestCompressionEnabled == null) { Optional systemSetting = SdkSystemSetting.AWS_DISABLE_REQUEST_COMPRESSION.getBooleanValue(); From ba69df67da5840a10c70f59e0702a95f2bf225d9 Mon Sep 17 00:00:00 2001 From: hdavidh Date: Thu, 20 Jul 2023 17:24:12 -0700 Subject: [PATCH 13/17] Remove unnecessary ExecutionAttribute --- .../traits/RequestCompressionTrait.java | 5 --- .../test-aws-json-async-client-class.java | 3 -- .../client/test-json-async-client-class.java | 3 -- .../poet/client/test-json-client-class.java | 3 -- .../client/test-query-async-client-class.java | 3 -- .../poet/client/test-query-client-class.java | 3 -- .../client/test-xml-async-client-class.java | 3 -- .../poet/client/test-xml-client-class.java | 3 -- .../interceptor/SdkExecutionAttribute.java | 8 ----- .../internal/http/AmazonAsyncHttpClient.java | 2 +- .../internal/http/AmazonSyncHttpClient.java | 2 +- .../pipeline/stages/CompressRequestStage.java | 35 ++++++++++--------- .../cloudwatch/CloudWatchIntegrationTest.java | 10 +++--- 13 files changed, 26 insertions(+), 57 deletions(-) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java index 102e1843d6e..122e38d730e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java @@ -20,9 +20,7 @@ import java.util.stream.Collectors; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; -import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; @@ -63,9 +61,6 @@ public static CodeBlock create(OperationModel operationModel, IntermediateModel SdkInternalExecutionAttribute.class, RequestCompression.class, encodings.stream().collect(Collectors.joining("\", \"", "\"", "\"")), operationModel.hasStreamingInput())) - .add(CodeBlock.of(".putExecutionAttribute($T.REQUEST_COMPRESSION_CONFIGURATION," - + "clientConfiguration.option($T.REQUEST_COMPRESSION_CONFIGURATION))", - SdkExecutionAttribute.class, SdkClientOption.class)) .build(); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java index e3e0accafef..ae6973fafab 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java @@ -37,7 +37,6 @@ import software.amazon.awssdk.core.client.handler.AttachHttpMetadataResponseHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; @@ -732,8 +731,6 @@ public CompletableFuture operationWithR .withMetricCollector(apiCallMetricCollector) .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, - clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java index 227de830803..81eb8e1aba4 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java @@ -40,7 +40,6 @@ import software.amazon.awssdk.core.client.handler.AttachHttpMetadataResponseHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; @@ -810,8 +809,6 @@ public CompletableFuture operationWithR .withMetricCollector(apiCallMetricCollector) .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, - clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index 4cb9f1a16e5..a2a8905fe12 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -18,7 +18,6 @@ import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; @@ -458,8 +457,6 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( .withMetricCollector(apiCallMetricCollector) .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, - clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index b4debbf3eb5..b0ca9683c0c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -26,7 +26,6 @@ import software.amazon.awssdk.core.client.handler.AsyncClientHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; @@ -544,8 +543,6 @@ public CompletableFuture operationWithR .withMetricCollector(apiCallMetricCollector) .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, - clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index 52d7dc7863a..0ca5d783789 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -17,7 +17,6 @@ import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; @@ -469,8 +468,6 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( .withMetricCollector(apiCallMetricCollector) .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, - clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java index 026c38a867c..959bfd8618b 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java @@ -32,7 +32,6 @@ import software.amazon.awssdk.core.client.handler.AsyncClientHandler; import software.amazon.awssdk.core.client.handler.ClientExecutionParams; import software.amazon.awssdk.core.http.HttpResponseHandler; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; @@ -567,8 +566,6 @@ public CompletableFuture operationWithR .withMetricCollector(apiCallMetricCollector) .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, - clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java index c96b3442c34..d52550654b1 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java @@ -18,7 +18,6 @@ import software.amazon.awssdk.core.client.handler.SyncClientHandler; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.http.HttpResponseHandler; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; @@ -406,8 +405,6 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( .withInput(operationWithRequestCompressionRequest) .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .putExecutionAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, - clientConfiguration.option(SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION)) .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java index 8dd6c86b8dc..4abbb390a60 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkExecutionAttribute.java @@ -19,7 +19,6 @@ import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.ClientType; -import software.amazon.awssdk.core.RequestCompressionConfiguration; import software.amazon.awssdk.core.ServiceConfiguration; import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.ChecksumSpecs; @@ -110,13 +109,6 @@ public class SdkExecutionAttribute { public static final ExecutionAttribute HTTP_RESPONSE_CHECKSUM_VALIDATION = new ExecutionAttribute<>( "HttpResponseChecksumValidation"); - /** - * The {@link RequestCompressionConfiguration}, which includes options to enable/disable request compression and set the - * minimum compression threshold. - */ - public static final ExecutionAttribute REQUEST_COMPRESSION_CONFIGURATION = - new ExecutionAttribute<>("RequestCompressionConfiguration"); - protected SdkExecutionAttribute() { } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java index 0a7e7338ed3..5f00eb4cfc7 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonAsyncHttpClient.java @@ -172,7 +172,7 @@ public CompletableFuture execute( .then(ApplyUserAgentStage::new) .then(MergeCustomHeadersStage::new) .then(MergeCustomQueryParamsStage::new) - .then(CompressRequestStage::new) + .then(() -> new CompressRequestStage(httpClientDependencies)) .then(() -> new HttpChecksumStage(ClientType.ASYNC)) .then(MakeRequestImmutableStage::new) .then(RequestPipelineBuilder diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java index d465f7636f1..aed81c4c0ae 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/AmazonSyncHttpClient.java @@ -173,7 +173,7 @@ public OutputT execute(HttpResponseHandler> response .then(ApplyUserAgentStage::new) .then(MergeCustomHeadersStage::new) .then(MergeCustomQueryParamsStage::new) - .then(CompressRequestStage::new) + .then(() -> new CompressRequestStage(httpClientDependencies)) .then(() -> new HttpChecksumStage(ClientType.SYNC)) .then(MakeRequestImmutableStage::new) // End of mutating request diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java index 9df24fd004a..859f8394ac9 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.core.internal.http.pipeline.stages; +import static software.amazon.awssdk.core.client.config.SdkClientOption.REQUEST_COMPRESSION_CONFIGURATION; + import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; @@ -27,10 +29,10 @@ import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.internal.compression.Compressor; import software.amazon.awssdk.core.internal.compression.CompressorType; +import software.amazon.awssdk.core.internal.http.HttpClientDependencies; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; import software.amazon.awssdk.http.ContentStreamProvider; @@ -44,6 +46,11 @@ public class CompressRequestStage implements MutableRequestToRequestPipeline { private static final int DEFAULT_MIN_COMPRESSION_SIZE = 10_240; private static final int MIN_COMPRESSION_SIZE_LIMIT = 10_485_760; + private final RequestCompressionConfiguration compressionConfig; + + public CompressRequestStage(HttpClientDependencies dependencies) { + compressionConfig = dependencies.clientConfiguration().option(REQUEST_COMPRESSION_CONFIGURATION); + } @Override public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, RequestExecutionContext context) @@ -67,7 +74,7 @@ public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, Requ return input; } - private static boolean shouldCompress(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { + private boolean shouldCompress(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { if (context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION) == null) { return false; } @@ -86,7 +93,7 @@ private static boolean shouldCompress(SdkHttpFullRequest.Builder input, RequestE return isRequestSizeWithinThreshold(input, context); } - private static boolean isStreaming(RequestExecutionContext context) { + private boolean isStreaming(RequestExecutionContext context) { return context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).isStreaming(); } @@ -96,7 +103,7 @@ private void compressEntirePayload(SdkHttpFullRequest.Builder input, Compressor input.contentStreamProvider(compressedStreamProvider); } - private static void updateContentEncodingHeader(SdkHttpFullRequest.Builder input, + private void updateContentEncodingHeader(SdkHttpFullRequest.Builder input, Compressor compressor) { if (input.firstMatchingHeader("Content-encoding").isPresent()) { input.appendHeader("Content-encoding", compressor.compressorType()); @@ -105,7 +112,7 @@ private static void updateContentEncodingHeader(SdkHttpFullRequest.Builder input } } - private static void updateContentLengthHeader(SdkHttpFullRequest.Builder input) { + private void updateContentLengthHeader(SdkHttpFullRequest.Builder input) { InputStream inputStream = input.contentStreamProvider().newStream(); try { byte[] bytes = IoUtils.toByteArray(inputStream); @@ -116,7 +123,7 @@ private static void updateContentLengthHeader(SdkHttpFullRequest.Builder input) } } - private static Compressor resolveCompressorType(ExecutionAttributes executionAttributes) { + private Compressor resolveCompressorType(ExecutionAttributes executionAttributes) { List encodings = executionAttributes.getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).getEncodings(); @@ -129,7 +136,7 @@ private static Compressor resolveCompressorType(ExecutionAttributes executionAtt return null; } - private static boolean resolveRequestCompressionEnabled(RequestExecutionContext context) { + private boolean resolveRequestCompressionEnabled(RequestExecutionContext context) { Optional requestCompressionEnabledRequestLevel = context.originalRequest().overrideConfiguration() @@ -139,9 +146,7 @@ private static boolean resolveRequestCompressionEnabled(RequestExecutionContext return requestCompressionEnabledRequestLevel.get(); } - Boolean isEnabled = context.executionAttributes() - .getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) - .requestCompressionEnabled(); + Boolean isEnabled = compressionConfig.requestCompressionEnabled(); if (isEnabled != null) { return isEnabled; } @@ -149,14 +154,14 @@ private static boolean resolveRequestCompressionEnabled(RequestExecutionContext return true; } - private static boolean isRequestSizeWithinThreshold(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { + private boolean isRequestSizeWithinThreshold(SdkHttpFullRequest.Builder input, RequestExecutionContext context) { int minimumCompressionThreshold = resolveMinCompressionSize(context); validateMinCompressionSizeInput(minimumCompressionThreshold); int requestSize = SdkBytes.fromInputStream(input.contentStreamProvider().newStream()).asByteArray().length; return requestSize >= minimumCompressionThreshold; } - private static int resolveMinCompressionSize(RequestExecutionContext context) { + private int resolveMinCompressionSize(RequestExecutionContext context) { Optional minimumCompressionSizeRequestLevel = context.originalRequest().overrideConfiguration() @@ -166,9 +171,7 @@ private static int resolveMinCompressionSize(RequestExecutionContext context) { return minimumCompressionSizeRequestLevel.get(); } - Integer threshold = context.executionAttributes() - .getAttribute(SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION) - .minimumCompressionThresholdInBytes(); + Integer threshold = compressionConfig.minimumCompressionThresholdInBytes(); if (threshold != null) { return threshold; } @@ -176,7 +179,7 @@ private static int resolveMinCompressionSize(RequestExecutionContext context) { return DEFAULT_MIN_COMPRESSION_SIZE; } - private static void validateMinCompressionSizeInput(int minCompressionSize) { + private void validateMinCompressionSizeInput(int minCompressionSize) { if (!(minCompressionSize >= 0 && minCompressionSize <= MIN_COMPRESSION_SIZE_LIMIT)) { throw SdkClientException.create("The minimum compression size must be non-negative with a maximum value of " + "10485760.", new IllegalArgumentException()); diff --git a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java index 957c1c74c08..92339e7f6ce 100644 --- a/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java +++ b/services/cloudwatch/src/it/java/software/amazon/awssdk/services/cloudwatch/CloudWatchIntegrationTest.java @@ -189,10 +189,7 @@ public void put_get_metricdata_list_metric_withRequestCompression_returns_succes .credentialsProvider(getCredentialsProvider()) .region(Region.US_WEST_2) .overrideConfiguration(c -> c.putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - requestCompressionTrait) - .putExecutionAttribute( - SdkExecutionAttribute.REQUEST_COMPRESSION_CONFIGURATION, - compressionConfiguration)) + requestCompressionTrait)) .build(); String measureName = this.getClass().getName() + System.currentTimeMillis(); @@ -203,7 +200,10 @@ public void put_get_metricdata_list_metric_withRequestCompression_returns_succes .unit("Count").value(42.0).build(); requestCompressionClient.putMetricData(PutMetricDataRequest.builder() - .namespace("AWS.EC2").metricData(datum).build()); + .namespace("AWS.EC2") + .metricData(datum) + .overrideConfiguration(c -> c.requestCompressionConfiguration(compressionConfiguration)) + .build()); GetMetricStatisticsResponse result = Waiter.run(() -> requestCompressionClient From 9a89a554563decc6b2bb52749f4ebb5fbd95d5db Mon Sep 17 00:00:00 2001 From: David Ho <70000000+davidh44@users.noreply.github.com> Date: Fri, 11 Aug 2023 11:42:04 -0700 Subject: [PATCH 14/17] Request compression sync streaming (#4222) * Refactor to common class AwsChunkedInputStream * Sync streaming compression * Sync streaming compression functional tests * Sync streaming compression integ tests * Fix integ test * Add unit tests --- .../traits/RequestCompressionTrait.java | 10 +- .../AwsSignedChunkedEncodingInputStream.java | 1 - .../pipeline/stages/CompressRequestStage.java | 20 ++- .../io/AwsChunkedEncodingInputStream.java | 90 ++-------- .../internal/io/AwsChunkedInputStream.java | 90 ++++++++++ .../io/AwsCompressionInputStream.java | 170 ++++++++++++++++++ ...uffer.java => UnderlyingStreamBuffer.java} | 6 +- .../CompressionContentStreamProvider.java | 55 ++++++ .../io/AwsCompressionInputStreamTest.java | 93 ++++++++++ services/mediastoredata/pom.xml | 6 + .../MediaStoreDataIntegrationTestBase.java | 155 ++++++++++++++++ ...stCompressionStreamingIntegrationTest.java | 168 +++++++++++++++++ ...ransferEncodingChunkedIntegrationTest.java | 122 +------------ .../src/it/resources/log4j2.properties | 38 ++++ .../customresponsemetadata/service-2.json | 24 +++ .../services/RequestCompressionTest.java | 93 ++++++++++ 16 files changed, 930 insertions(+), 211 deletions(-) create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java rename core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/{DecodedStreamBuffer.java => UnderlyingStreamBuffer.java} (93%) create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java create mode 100644 services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java create mode 100644 services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java create mode 100644 services/mediastoredata/src/it/resources/log4j2.properties diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java index 122e38d730e..5168b2e36ed 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/traits/RequestCompressionTrait.java @@ -42,13 +42,9 @@ public static CodeBlock create(OperationModel operationModel, IntermediateModel return CodeBlock.of(""); } - // TODO : remove once request compression for streaming operations is supported - if (operationModel.isStreaming()) { - throw new IllegalStateException("Request compression for streaming operations is not yet supported in the AWS SDK " - + "for Java."); - } - - // TODO : remove once S3 checksum interceptors are moved to occur after CompressRequestStage + // TODO : remove once: + // 1) S3 checksum interceptors are moved to occur after CompressRequestStage + // 2) Transfer-Encoding:chunked is supported in S3 if (model.getMetadata().getServiceName().equals("S3")) { throw new IllegalStateException("Request compression for S3 is not yet supported in the AWS SDK for Java."); } diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java index 636fad74f9f..3174eb7c6ca 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/signer/internal/chunkedencoding/AwsSignedChunkedEncodingInputStream.java @@ -40,7 +40,6 @@ @SdkInternalApi public final class AwsSignedChunkedEncodingInputStream extends AwsChunkedEncodingInputStream { - private static final String CRLF = "\r\n"; private static final String CHUNK_SIGNATURE_HEADER = ";chunk-signature="; private static final String CHECKSUM_SIGNATURE_HEADER = "x-amz-trailer-signature:"; private String previousChunkSignature; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java index 859f8394ac9..1eadb88d32d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java @@ -35,6 +35,7 @@ import software.amazon.awssdk.core.internal.http.HttpClientDependencies; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; +import software.amazon.awssdk.core.internal.sync.CompressionContentStreamProvider; import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.utils.IoUtils; @@ -67,10 +68,21 @@ public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, Requ compressEntirePayload(input, compressor); updateContentEncodingHeader(input, compressor); updateContentLengthHeader(input); + return input; + } + + if (!isTransferEncodingChunked(input)) { + return input; } - // TODO : streaming - sync & async + if (context.requestProvider() == null) { + // sync streaming + input.contentStreamProvider(new CompressionContentStreamProvider(input.contentStreamProvider(), compressor)); + } + + // TODO : streaming - async + updateContentEncodingHeader(input, compressor); return input; } @@ -123,6 +135,12 @@ private void updateContentLengthHeader(SdkHttpFullRequest.Builder input) { } } + private boolean isTransferEncodingChunked(SdkHttpFullRequest.Builder input) { + return input.firstMatchingHeader("Transfer-Encoding") + .map(headerValue -> headerValue.equals("chunked")) + .orElse(false); + } + private Compressor resolveCompressorType(ExecutionAttributes executionAttributes) { List encodings = executionAttributes.getAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION).getEncodings(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java index f382bd5ced4..ec4870f5e68 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedEncodingInputStream.java @@ -22,8 +22,6 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.internal.chunked.AwsChunkedEncodingConfig; -import software.amazon.awssdk.core.io.SdkInputStream; -import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Validate; /** @@ -37,37 +35,18 @@ * the wrapped stream. */ @SdkInternalApi -public abstract class AwsChunkedEncodingInputStream extends SdkInputStream { +public abstract class AwsChunkedEncodingInputStream extends AwsChunkedInputStream { - public static final int DEFAULT_CHUNK_SIZE = 128 * 1024; - protected static final int SKIP_BUFFER_SIZE = 256 * 1024; protected static final String CRLF = "\r\n"; protected static final byte[] FINAL_CHUNK = new byte[0]; protected static final String HEADER_COLON_SEPARATOR = ":"; - private static final Logger log = Logger.loggerFor(AwsChunkedEncodingInputStream.class); protected byte[] calculatedChecksum = null; protected final String checksumHeaderForTrailer; protected boolean isTrailingTerminated = true; - private InputStream is = null; private final int chunkSize; private final int maxBufferSize; private final SdkChecksum sdkChecksum; private boolean isLastTrailingCrlf; - /** - * Iterator on the current chunk. - */ - private ChunkContentIterator currentChunkIterator; - - /** - * Iterator on the buffer of the decoded stream, - * Null if the wrapped stream is marksupported, - * otherwise it will be initialized when this wrapper is marked. - */ - private DecodedStreamBuffer decodedStreamBuffer; - - private boolean isAtStart = true; - private boolean isTerminating = false; - /** * Creates a chunked encoding input stream initialized with the originating stream. The configuration allows @@ -89,10 +68,10 @@ protected AwsChunkedEncodingInputStream(InputStream in, AwsChunkedEncodingInputStream originalChunkedStream = (AwsChunkedEncodingInputStream) in; providedMaxBufferSize = Math.max(originalChunkedStream.maxBufferSize, providedMaxBufferSize); is = originalChunkedStream.is; - decodedStreamBuffer = originalChunkedStream.decodedStreamBuffer; + underlyingStreamBuffer = originalChunkedStream.underlyingStreamBuffer; } else { is = in; - decodedStreamBuffer = null; + underlyingStreamBuffer = null; } this.chunkSize = awsChunkedEncodingConfig.chunkSize(); this.maxBufferSize = providedMaxBufferSize; @@ -153,19 +132,6 @@ public T checksumHeaderForTrailer(String checksumHeaderForTrailer) { } - @Override - public int read() throws IOException { - byte[] tmp = new byte[1]; - int count = read(tmp, 0, 1); - if (count > 0) { - log.debug(() -> "One byte read from the stream."); - int unsignedByte = (int) tmp[0] & 0xFF; - return unsignedByte; - } else { - return count; - } - } - @Override public int read(byte[] b, int off, int len) throws IOException { abortIfNeeded(); @@ -211,32 +177,6 @@ private boolean setUpTrailingChunks() { return true; } - @Override - public long skip(long n) throws IOException { - if (n <= 0) { - return 0; - } - long remaining = n; - int toskip = (int) Math.min(SKIP_BUFFER_SIZE, n); - byte[] temp = new byte[toskip]; - while (remaining > 0) { - int count = read(temp, 0, toskip); - if (count < 0) { - break; - } - remaining -= count; - } - return n - remaining; - } - - /** - * @see java.io.InputStream#markSupported() - */ - @Override - public boolean markSupported() { - return true; - } - /** * The readlimit parameter is ignored. */ @@ -256,7 +196,7 @@ public void mark(int readlimit) { } else { log.debug(() -> "AwsChunkedEncodingInputStream marked at the start of the stream " + "(initializing the buffer since the wrapped stream is not mark-supported)."); - decodedStreamBuffer = new DecodedStreamBuffer(maxBufferSize); + underlyingStreamBuffer = new UnderlyingStreamBuffer(maxBufferSize); } } @@ -280,8 +220,8 @@ public void reset() throws IOException { is.reset(); } else { log.debug(() -> "AwsChunkedEncodingInputStream reset (will use the buffer of the decoded stream)."); - Validate.notNull(decodedStreamBuffer, "Cannot reset the stream because the mark is not set."); - decodedStreamBuffer.startReadBuffer(); + Validate.notNull(underlyingStreamBuffer, "Cannot reset the stream because the mark is not set."); + underlyingStreamBuffer.startReadBuffer(); } isAtStart = true; isTerminating = false; @@ -298,14 +238,14 @@ private boolean setUpNextChunk() throws IOException { int chunkSizeInBytes = 0; while (chunkSizeInBytes < chunkSize) { /** Read from the buffer of the decoded stream */ - if (null != decodedStreamBuffer && decodedStreamBuffer.hasNext()) { - chunkData[chunkSizeInBytes++] = decodedStreamBuffer.next(); + if (null != underlyingStreamBuffer && underlyingStreamBuffer.hasNext()) { + chunkData[chunkSizeInBytes++] = underlyingStreamBuffer.next(); } else { /** Read from the wrapped stream */ int bytesToRead = chunkSize - chunkSizeInBytes; int count = is.read(chunkData, chunkSizeInBytes, bytesToRead); if (count != -1) { - if (null != decodedStreamBuffer) { - decodedStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); + if (null != underlyingStreamBuffer) { + underlyingStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); } chunkSizeInBytes += count; } else { @@ -333,13 +273,6 @@ private boolean setUpNextChunk() throws IOException { } } - - @Override - protected InputStream getWrappedInputStream() { - return is; - } - - /** * The final chunk. * @@ -361,5 +294,4 @@ protected InputStream getWrappedInputStream() { * @return ChecksumChunkHeader in bytes based on the Header name field. */ protected abstract byte[] createChecksumChunkHeader(); - -} \ No newline at end of file +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java new file mode 100644 index 00000000000..11beb216f16 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsChunkedInputStream.java @@ -0,0 +1,90 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import java.io.IOException; +import java.io.InputStream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.io.SdkInputStream; +import software.amazon.awssdk.utils.Logger; + +/** + * A wrapper of InputStream that implements streaming in chunks. + */ +@SdkInternalApi +public abstract class AwsChunkedInputStream extends SdkInputStream { + public static final int DEFAULT_CHUNK_SIZE = 128 * 1024; + protected static final int SKIP_BUFFER_SIZE = 256 * 1024; + protected static final Logger log = Logger.loggerFor(AwsChunkedInputStream.class); + protected InputStream is; + /** + * Iterator on the current chunk. + */ + protected ChunkContentIterator currentChunkIterator; + + /** + * Iterator on the buffer of the underlying stream, + * Null if the wrapped stream is marksupported, + * otherwise it will be initialized when this wrapper is marked. + */ + protected UnderlyingStreamBuffer underlyingStreamBuffer; + protected boolean isAtStart = true; + protected boolean isTerminating = false; + + @Override + public int read() throws IOException { + byte[] tmp = new byte[1]; + int count = read(tmp, 0, 1); + if (count > 0) { + log.debug(() -> "One byte read from the stream."); + int unsignedByte = (int) tmp[0] & 0xFF; + return unsignedByte; + } else { + return count; + } + } + + @Override + public long skip(long n) throws IOException { + if (n <= 0) { + return 0; + } + long remaining = n; + int toskip = (int) Math.min(SKIP_BUFFER_SIZE, n); + byte[] temp = new byte[toskip]; + while (remaining > 0) { + int count = read(temp, 0, toskip); + if (count < 0) { + break; + } + remaining -= count; + } + return n - remaining; + } + + /** + * @see InputStream#markSupported() + */ + @Override + public boolean markSupported() { + return true; + } + + @Override + protected InputStream getWrappedInputStream() { + return is; + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java new file mode 100644 index 00000000000..93642bad8c4 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStream.java @@ -0,0 +1,170 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.utils.Validate; + +/** + * A wrapper class of InputStream that implements compression in chunks. + */ +@SdkInternalApi +public final class AwsCompressionInputStream extends AwsChunkedInputStream { + private final Compressor compressor; + + private AwsCompressionInputStream(InputStream in, Compressor compressor) { + this.compressor = compressor; + if (in instanceof AwsCompressionInputStream) { + // This could happen when the request is retried. + AwsCompressionInputStream originalCompressionStream = (AwsCompressionInputStream) in; + this.is = originalCompressionStream.is; + this.underlyingStreamBuffer = originalCompressionStream.underlyingStreamBuffer; + } else { + this.is = in; + this.underlyingStreamBuffer = null; + } + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + abortIfNeeded(); + Validate.notNull(b, "buff"); + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + if (currentChunkIterator == null || !currentChunkIterator.hasNext()) { + if (isTerminating) { + return -1; + } + isTerminating = setUpNextChunk(); + } + + int count = currentChunkIterator.read(b, off, len); + if (count > 0) { + isAtStart = false; + log.trace(() -> count + " byte read from the stream."); + } + return count; + } + + private boolean setUpNextChunk() throws IOException { + byte[] chunkData = new byte[DEFAULT_CHUNK_SIZE]; + int chunkSizeInBytes = 0; + while (chunkSizeInBytes < DEFAULT_CHUNK_SIZE) { + /** Read from the buffer of the uncompressed stream */ + if (underlyingStreamBuffer != null && underlyingStreamBuffer.hasNext()) { + chunkData[chunkSizeInBytes++] = underlyingStreamBuffer.next(); + } else { /** Read from the wrapped stream */ + int bytesToRead = DEFAULT_CHUNK_SIZE - chunkSizeInBytes; + int count = is.read(chunkData, chunkSizeInBytes, bytesToRead); + if (count != -1) { + if (underlyingStreamBuffer != null) { + underlyingStreamBuffer.buffer(chunkData, chunkSizeInBytes, count); + } + chunkSizeInBytes += count; + } else { + break; + } + } + } + if (chunkSizeInBytes == 0) { + return true; + } + + if (chunkSizeInBytes < chunkData.length) { + chunkData = Arrays.copyOf(chunkData, chunkSizeInBytes); + } + // Compress the chunk + byte[] compressedChunkData = compressor.compress(chunkData); + currentChunkIterator = new ChunkContentIterator(compressedChunkData); + return false; + } + + /** + * The readlimit parameter is ignored. + */ + @Override + public void mark(int readlimit) { + abortIfNeeded(); + if (!isAtStart) { + throw new UnsupportedOperationException("Compression stream only supports mark() at the start of the stream."); + } + if (is.markSupported()) { + log.debug(() -> "AwsCompressionInputStream marked at the start of the stream " + + "(will directly mark the wrapped stream since it's mark-supported)."); + is.mark(readlimit); + } else { + log.debug(() -> "AwsCompressionInputStream marked at the start of the stream " + + "(initializing the buffer since the wrapped stream is not mark-supported)."); + underlyingStreamBuffer = new UnderlyingStreamBuffer(SKIP_BUFFER_SIZE); + } + } + + /** + * Reset the stream, either by resetting the wrapped stream or using the + * buffer created by this class. + */ + @Override + public void reset() throws IOException { + abortIfNeeded(); + // Clear up any encoded data + currentChunkIterator = null; + // Reset the wrapped stream if it is mark-supported, + // otherwise use our buffered data. + if (is.markSupported()) { + log.debug(() -> "AwsCompressionInputStream reset " + + "(will reset the wrapped stream because it is mark-supported)."); + is.reset(); + } else { + log.debug(() -> "AwsCompressionInputStream reset (will use the buffer of the decoded stream)."); + Validate.notNull(underlyingStreamBuffer, "Cannot reset the stream because the mark is not set."); + underlyingStreamBuffer.startReadBuffer(); + } + isAtStart = true; + isTerminating = false; + } + + public static final class Builder { + InputStream inputStream; + Compressor compressor; + + public AwsCompressionInputStream build() { + return new AwsCompressionInputStream( + this.inputStream, this.compressor); + } + + public Builder inputStream(InputStream inputStream) { + this.inputStream = inputStream; + return this; + } + + public Builder compressor(Compressor compressor) { + this.compressor = compressor; + return this; + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java similarity index 93% rename from core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java rename to core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java index f6d3c47c0c1..6fc086983fd 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/DecodedStreamBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/UnderlyingStreamBuffer.java @@ -20,8 +20,8 @@ import software.amazon.awssdk.utils.Logger; @SdkInternalApi -class DecodedStreamBuffer { - private static final Logger log = Logger.loggerFor(DecodedStreamBuffer.class); +class UnderlyingStreamBuffer { + private static final Logger log = Logger.loggerFor(UnderlyingStreamBuffer.class); private byte[] bufferArray; private int maxBufferSize; @@ -29,7 +29,7 @@ class DecodedStreamBuffer { private int pos = -1; private boolean bufferSizeOverflow; - DecodedStreamBuffer(int maxBufferSize) { + UnderlyingStreamBuffer(int maxBufferSize) { bufferArray = new byte[maxBufferSize]; this.maxBufferSize = maxBufferSize; } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java new file mode 100644 index 00000000000..52a222bc372 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/CompressionContentStreamProvider.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.sync; + +import java.io.InputStream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.io.AwsCompressionInputStream; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.utils.IoUtils; + +/** + * {@link ContentStreamProvider} implementation for compression. + */ +@SdkInternalApi +public class CompressionContentStreamProvider implements ContentStreamProvider { + private final ContentStreamProvider underlyingInputStreamProvider; + private InputStream currentStream; + private final Compressor compressor; + + public CompressionContentStreamProvider(ContentStreamProvider underlyingInputStreamProvider, Compressor compressor) { + this.underlyingInputStreamProvider = underlyingInputStreamProvider; + this.compressor = compressor; + } + + @Override + public InputStream newStream() { + closeCurrentStream(); + currentStream = AwsCompressionInputStream.builder() + .inputStream(underlyingInputStreamProvider.newStream()) + .compressor(compressor) + .build(); + return currentStream; + } + + private void closeCurrentStream() { + if (currentStream != null) { + IoUtils.closeQuietly(currentStream, null); + currentStream = null; + } + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java new file mode 100644 index 00000000000..99359dfcd58 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/io/AwsCompressionInputStreamTest.java @@ -0,0 +1,93 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.io; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static software.amazon.awssdk.core.util.FileUtils.generateRandomAsciiFile; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Random; +import org.junit.BeforeClass; +import org.junit.Test; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; + +public class AwsCompressionInputStreamTest { + private static Compressor compressor; + + @BeforeClass + public static void setup() throws IOException { + compressor = new GzipCompressor(); + } + + @Test + public void nonMarkSupportedInputStream_marksAndResetsCorrectly() throws IOException { + File file = generateRandomAsciiFile(100); + InputStream is = new FileInputStream(file); + assertFalse(is.markSupported()); + + AwsCompressionInputStream compressionInputStream = AwsCompressionInputStream.builder() + .inputStream(is) + .compressor(compressor) + .build(); + + compressionInputStream.mark(100); + compressionInputStream.reset(); + String read1 = readInputStream(compressionInputStream); + compressionInputStream.reset(); + String read2 = readInputStream(compressionInputStream); + assertThat(read1).isEqualTo(read2); + } + + @Test + public void markSupportedInputStream_marksAndResetsCorrectly() throws IOException { + InputStream is = new ByteArrayInputStream(generateRandomBody(100)); + assertTrue(is.markSupported()); + AwsCompressionInputStream compressionInputStream = AwsCompressionInputStream.builder() + .inputStream(is) + .compressor(compressor) + .build(); + compressionInputStream.mark(100); + compressionInputStream.reset(); + String read1 = readInputStream(compressionInputStream); + compressionInputStream.reset(); + String read2 = readInputStream(compressionInputStream); + assertThat(read1).isEqualTo(read2); + } + + private byte[] generateRandomBody(int size) { + byte[] randomData = new byte[size]; + new Random().nextBytes(randomData); + return randomData; + } + + private String readInputStream(InputStream is) throws IOException { + byte[] buffer = new byte[512]; + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + int bytesRead; + while ((bytesRead = is.read(buffer)) != -1) { + byteArrayOutputStream.write(buffer, 0, bytesRead); + } + return byteArrayOutputStream.toString(); + } +} diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index 32d52843ee6..3e5e81a0a21 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -74,5 +74,11 @@ commons-lang3 test + + software.amazon.awssdk + mediastore + ${awsjavasdk.version} + test + diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java new file mode 100644 index 00000000000..3a0e7006ef8 --- /dev/null +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java @@ -0,0 +1,155 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.mediastoredata; + +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import io.reactivex.Flowable; +import java.io.ByteArrayInputStream; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.ContentStreamProvider; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.services.mediastore.MediaStoreClient; +import software.amazon.awssdk.services.mediastore.model.Container; +import software.amazon.awssdk.services.mediastore.model.ContainerStatus; +import software.amazon.awssdk.services.mediastore.model.DescribeContainerResponse; +import software.amazon.awssdk.testutils.Waiter; +import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; + +/** + * Base class for MediaStoreData integration tests. Used for Transfer-Encoding and Request Compression testing. + */ +public class MediaStoreDataIntegrationTestBase extends AwsIntegrationTestBase { + protected static final String CONTAINER_NAME = "java-sdk-test-mediastoredata-" + Instant.now().toEpochMilli(); + protected static AwsCredentialsProvider credentialsProvider; + protected static MediaStoreClient mediaStoreClient; + protected static URI uri; + + @BeforeAll + public static void init() { + credentialsProvider = getCredentialsProvider(); + mediaStoreClient = MediaStoreClient.builder() + .credentialsProvider(credentialsProvider) + .httpClient(ApacheHttpClient.builder().build()) + .build(); + uri = URI.create(createContainer().endpoint()); + } + + @AfterEach + public void reset() { + CaptureTransferEncodingHeaderInterceptor.reset(); + } + + private static Container createContainer() { + mediaStoreClient.createContainer(r -> r.containerName(CONTAINER_NAME)); + DescribeContainerResponse response = waitContainerToBeActive(); + return response.container(); + } + + private static DescribeContainerResponse waitContainerToBeActive() { + return Waiter.run(() -> mediaStoreClient.describeContainer(r -> r.containerName(CONTAINER_NAME))) + .until(r -> r.container().status() == ContainerStatus.ACTIVE) + .orFailAfter(Duration.ofMinutes(3)); + } + + protected AsyncRequestBody customAsyncRequestBodyWithoutContentLength() { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes("Random text".getBytes())) + .subscribe(s); + } + }; + } + + protected static class CaptureTransferEncodingHeaderInterceptor implements ExecutionInterceptor { + public static boolean isChunked; + + public static void reset() { + isChunked = false; + } + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + isChunked = context.httpRequest().matchingHeaders("Transfer-Encoding").contains("chunked"); + } + } + + protected static class TestContentProvider implements ContentStreamProvider { + private final byte[] content; + private final List createdStreams = new ArrayList<>(); + private CloseTrackingInputStream currentStream; + + protected TestContentProvider(byte[] content) { + this.content = content.clone(); + } + + @Override + public InputStream newStream() { + if (currentStream != null) { + invokeSafely(currentStream::close); + } + currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); + createdStreams.add(currentStream); + return currentStream; + } + + List getCreatedStreams() { + return Collections.unmodifiableList(createdStreams); + } + } + + protected static class CloseTrackingInputStream extends FilterInputStream { + private boolean isClosed = false; + + CloseTrackingInputStream(InputStream in) { + super(in); + } + + @Override + public void close() throws IOException { + super.close(); + isClosed = true; + } + + boolean isClosed() { + return isClosed; + } + } +} diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java new file mode 100644 index 00000000000..9530f2319b3 --- /dev/null +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java @@ -0,0 +1,168 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.mediastoredata; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.RequestCompressionConfiguration; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.mediastoredata.model.DeleteObjectRequest; +import software.amazon.awssdk.services.mediastoredata.model.GetObjectRequest; +import software.amazon.awssdk.services.mediastoredata.model.GetObjectResponse; +import software.amazon.awssdk.services.mediastoredata.model.ObjectNotFoundException; +import software.amazon.awssdk.services.mediastoredata.model.PutObjectRequest; +import software.amazon.awssdk.testutils.Waiter; + +/** + * Integration test to verify Request Compression functionalities for streaming operations. Do not delete. + */ +public class RequestCompressionStreamingIntegrationTest extends MediaStoreDataIntegrationTestBase { + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private static String compressedBody; + private static MediaStoreDataClient syncClient; + private static MediaStoreDataAsyncClient asyncClient; + private static PutObjectRequest putObjectRequest; + private static DeleteObjectRequest deleteObjectRequest; + private static GetObjectRequest getObjectRequest; + + @BeforeAll + public static void setup() { + RequestCompressionConfiguration compressionConfiguration = + RequestCompressionConfiguration.builder() + .minimumCompressionThresholdInBytes(1) + .requestCompressionEnabled(true) + .build(); + + RequestCompression requestCompressionTrait = RequestCompression.builder() + .encodings("gzip") + .isStreaming(true) + .build(); + + syncClient = MediaStoreDataClient.builder() + .endpointOverride(uri) + .credentialsProvider(credentialsProvider) + .httpClient(ApacheHttpClient.builder().build()) + .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureTransferEncodingHeaderInterceptor()) + .addExecutionInterceptor(new CaptureContentEncodingHeaderInterceptor()) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait) + .requestCompressionConfiguration(compressionConfiguration)) + .build(); + + asyncClient = MediaStoreDataAsyncClient.builder() + .endpointOverride(uri) + .credentialsProvider(getCredentialsProvider()) + .httpClient(NettyNioAsyncHttpClient.create()) + .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureTransferEncodingHeaderInterceptor()) + .addExecutionInterceptor(new CaptureContentEncodingHeaderInterceptor()) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + requestCompressionTrait) + .requestCompressionConfiguration(compressionConfiguration)) + .build(); + + putObjectRequest = PutObjectRequest.builder() + .contentType("application/octet-stream") + .path("/foo") + .overrideConfiguration( + o -> o.requestCompressionConfiguration( + c -> c.requestCompressionEnabled(true))) + .build(); + deleteObjectRequest = DeleteObjectRequest.builder().path("/foo").build(); + getObjectRequest = GetObjectRequest.builder().path("/foo").build(); + + Compressor compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)).asByteArray(); + compressedBody = new String(compressedBodyBytes); + } + + @AfterAll + public static void tearDown() { + syncClient.deleteObject(deleteObjectRequest); + Waiter.run(() -> syncClient.describeObject(r -> r.path("/foo"))) + .untilException(ObjectNotFoundException.class) + .orFailAfter(Duration.ofMinutes(1)); + } + + @AfterEach + public void cleanUp() { + CaptureContentEncodingHeaderInterceptor.reset(); + } + + @Test + public void putObject_withRequestCompressionSyncStreaming_compressesPayloadAndSendsCorrectly() throws IOException { + TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8)); + syncClient.putObject(putObjectRequest, RequestBody.fromContentProvider(provider, "binary/octet-stream")); + + assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); + assertThat(CaptureContentEncodingHeaderInterceptor.isGzip).isTrue(); + + ResponseInputStream response = syncClient.getObject(getObjectRequest); + byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8).length]; + response.read(buffer); + String retrievedContent = new String(buffer); + assertThat(UNCOMPRESSED_BODY).isEqualTo(retrievedContent); + } + + // TODO : uncomment once async streaming compression is implemented + /*@Test + public void nettyClientPutObject_withoutContentLength_sendsSuccessfully() throws IOException { + AsyncRequestBody asyncRequestBody = customAsyncRequestBodyWithoutContentLength(); + asyncClient.putObject(putObjectRequest, asyncRequestBody).join(); + + assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); + assertThat(CaptureContentEncodingHeaderInterceptor.isGzip).isTrue(); + + // verify stored content is correct + ResponseInputStream response = syncClient.getObject(getObjectRequest); + byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8).length]; + response.read(buffer); + String retrievedContent = new String(buffer); + assertThat(UNCOMPRESSED_BODY).isEqualTo(retrievedContent); + assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); + }*/ + + private static class CaptureContentEncodingHeaderInterceptor implements ExecutionInterceptor { + public static boolean isGzip; + + public static void reset() { + isGzip = false; + } + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + isGzip = context.httpRequest().matchingHeaders("Content-Encoding").contains("gzip"); + } + } +} diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java index acab0a8d672..80fb67dc6fa 100644 --- a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java @@ -16,70 +16,34 @@ package software.amazon.awssdk.services.mediastoredata; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; -import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; -import io.reactivex.Flowable; -import java.io.ByteArrayInputStream; -import java.io.FilterInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.reactivestreams.Subscriber; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; -import software.amazon.awssdk.services.mediastore.MediaStoreClient; -import software.amazon.awssdk.services.mediastore.model.Container; -import software.amazon.awssdk.services.mediastore.model.ContainerStatus; -import software.amazon.awssdk.services.mediastore.model.DescribeContainerResponse; import software.amazon.awssdk.services.mediastoredata.model.DeleteObjectRequest; import software.amazon.awssdk.services.mediastoredata.model.ObjectNotFoundException; import software.amazon.awssdk.services.mediastoredata.model.PutObjectRequest; import software.amazon.awssdk.testutils.Waiter; -import software.amazon.awssdk.testutils.service.AwsIntegrationTestBase; /** * Integration test to verify Transfer-Encoding:chunked functionalities for all supported HTTP clients. Do not delete. */ -public class TransferEncodingChunkedIntegrationTest extends AwsIntegrationTestBase { - private static final String CONTAINER_NAME = "java-sdk-test-" + Instant.now().toEpochMilli(); - private static MediaStoreClient mediaStoreClient; +public class TransferEncodingChunkedIntegrationTest extends MediaStoreDataIntegrationTestBase { private static MediaStoreDataClient syncClientWithApache; private static MediaStoreDataClient syncClientWithUrlConnection; private static MediaStoreDataAsyncClient asyncClientWithNetty; - private static AwsCredentialsProvider credentialsProvider; - private static Container container; private static PutObjectRequest putObjectRequest; private static DeleteObjectRequest deleteObjectRequest; @BeforeAll public static void setup() { - credentialsProvider = getCredentialsProvider(); - mediaStoreClient = MediaStoreClient.builder() - .credentialsProvider(credentialsProvider) - .httpClient(ApacheHttpClient.builder().build()) - .build(); - container = createContainer(); - URI uri = URI.create(container.endpoint()); - syncClientWithApache = MediaStoreDataClient.builder() .endpointOverride(uri) .credentialsProvider(credentialsProvider) @@ -117,7 +81,7 @@ public static void tearDown() { Waiter.run(() -> syncClientWithApache.describeObject(r -> r.path("/foo"))) .untilException(ObjectNotFoundException.class) .orFailAfter(Duration.ofMinutes(1)); - CaptureTransferEncodingHeaderInterceptor.reset(); + mediaStoreClient.deleteContainer(r -> r.containerName(CONTAINER_NAME)); } @Test @@ -139,86 +103,4 @@ public void nettyClientPutObject_withoutContentLength_sendsSuccessfully() { asyncClientWithNetty.putObject(putObjectRequest, customAsyncRequestBodyWithoutContentLength()).join(); assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); } - - private static Container createContainer() { - mediaStoreClient.createContainer(r -> r.containerName(CONTAINER_NAME)); - DescribeContainerResponse response = waitContainerToBeActive(); - return response.container(); - } - - private static DescribeContainerResponse waitContainerToBeActive() { - return Waiter.run(() -> mediaStoreClient.describeContainer(r -> r.containerName(CONTAINER_NAME))) - .until(r -> ContainerStatus.ACTIVE.equals(r.container().status())) - .orFailAfter(Duration.ofMinutes(3)); - } - - private static class CaptureTransferEncodingHeaderInterceptor implements ExecutionInterceptor { - private static boolean isChunked; - - public static void reset() { - isChunked = false; - } - - @Override - public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { - isChunked = context.httpRequest().matchingHeaders("Transfer-Encoding").contains("chunked"); - } - } - - private AsyncRequestBody customAsyncRequestBodyWithoutContentLength() { - return new AsyncRequestBody() { - @Override - public Optional contentLength() { - return Optional.empty(); - } - - @Override - public void subscribe(Subscriber s) { - Flowable.fromPublisher(AsyncRequestBody.fromBytes("Random text".getBytes())) - .subscribe(s); - } - }; - } - - private static class TestContentProvider implements ContentStreamProvider { - private final byte[] content; - private final List createdStreams = new ArrayList<>(); - private CloseTrackingInputStream currentStream; - - private TestContentProvider(byte[] content) { - this.content = content; - } - - @Override - public InputStream newStream() { - if (currentStream != null) { - invokeSafely(currentStream::close); - } - currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); - createdStreams.add(currentStream); - return currentStream; - } - - List getCreatedStreams() { - return createdStreams; - } - } - - private static class CloseTrackingInputStream extends FilterInputStream { - private boolean isClosed = false; - - CloseTrackingInputStream(InputStream in) { - super(in); - } - - @Override - public void close() throws IOException { - super.close(); - isClosed = true; - } - - boolean isClosed() { - return isClosed; - } - } } diff --git a/services/mediastoredata/src/it/resources/log4j2.properties b/services/mediastoredata/src/it/resources/log4j2.properties new file mode 100644 index 00000000000..ea24f17148e --- /dev/null +++ b/services/mediastoredata/src/it/resources/log4j2.properties @@ -0,0 +1,38 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# + +status = warn + +appender.console.type = Console +appender.console.name = ConsoleAppender +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n%throwable + +rootLogger.level = info +rootLogger.appenderRef.stdout.ref = ConsoleAppender + +# Uncomment below to enable more specific logging +# +#logger.sdk.name = software.amazon.awssdk +#logger.sdk.level = debug +# +#logger.request.name = software.amazon.awssdk.request +#logger.request.level = debug +# +#logger.apache.name = org.apache.http.wire +#logger.apache.level = debug +# +#logger.netty.name = io.netty.handler.logging +#logger.netty.level = debug \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json index b1f994fd1d4..8cdb71614e3 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/customresponsemetadata/service-2.json @@ -289,6 +289,19 @@ "encodings": ["gzip"] } }, + "PutOperationWithStreamingRequestCompression":{ + "name":"PutOperationWithStreamingRequestCompression", + "http":{ + "method":"PUT", + "requestUri":"/" + }, + "input":{"shape":"RequestCompressionStructureWithStreaming"}, + "output":{"shape":"RequestCompressionStructureWithStreaming"}, + "requestCompression": { + "encodings": ["gzip"] + }, + "authtype":"v4-unsigned-body" + }, "GetOperationWithChecksum":{ "name":"GetOperationWithChecksum", "http":{ @@ -1030,6 +1043,17 @@ } }, "payload":"Body" + }, + "RequestCompressionStructureWithStreaming":{ + "type":"structure", + "members":{ + "Body":{ + "shape":"Body", + "documentation":"

Object data.

", + "streaming":true + } + }, + "payload":"Body" } } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java index e66f5f47bd1..29664c5f53f 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java @@ -16,9 +16,16 @@ package software.amazon.awssdk.services; import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; +import java.io.ByteArrayInputStream; +import java.io.FilterInputStream; +import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.time.Duration; +import java.util.ArrayList; +import java.util.List; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.AfterEach; @@ -26,6 +33,9 @@ import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.internal.compression.Compressor; import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.HttpExecuteResponse; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpResponse; @@ -33,6 +43,7 @@ import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithStreamingRequestCompressionRequest; import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; @@ -46,6 +57,7 @@ public class RequestCompressionTest { private ProtocolRestJsonClient syncClient; private ProtocolRestJsonAsyncClient asyncClient; private Compressor compressor; + private RequestBody requestBody; @BeforeEach public void setUp() { @@ -65,6 +77,8 @@ public void setUp() { byte[] compressedBodyBytes = compressor.compress(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)).asByteArray(); compressedLen = compressedBodyBytes.length; compressedBody = new String(compressedBodyBytes); + TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8)); + requestBody = RequestBody.fromContentProvider(provider, "binary/octet-stream"); } @AfterEach @@ -118,6 +132,24 @@ public void async_nonStreaming_compression_compressesCorrectly() { assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); } + @Test + public void sync_streaming_compression_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + syncClient.putOperationWithStreamingRequestCompression(request, requestBody, ResponseTransformer.toBytes()); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + @Test public void sync_nonStreaming_compression_withRetry_compressesCorrectly() { mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); @@ -165,6 +197,25 @@ public void async_nonStreaming_compression_withRetry_compressesCorrectly() { assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); } + @Test + public void sync_streaming_compression_withRetry_compressesCorrectly() { + mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + syncClient.putOperationWithStreamingRequestCompression(request, requestBody, ResponseTransformer.toBytes()); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + private HttpExecuteResponse mockResponse() { return HttpExecuteResponse.builder() .response(SdkHttpResponse.builder().statusCode(200).build()) @@ -176,4 +227,46 @@ private HttpExecuteResponse mockErrorResponse() { .response(SdkHttpResponse.builder().statusCode(500).build()) .build(); } + + private static final class TestContentProvider implements ContentStreamProvider { + private final byte[] content; + private final List createdStreams = new ArrayList<>(); + private CloseTrackingInputStream currentStream; + + private TestContentProvider(byte[] content) { + this.content = content; + } + + @Override + public InputStream newStream() { + if (currentStream != null) { + invokeSafely(currentStream::close); + } + currentStream = new CloseTrackingInputStream(new ByteArrayInputStream(content)); + createdStreams.add(currentStream); + return currentStream; + } + + List getCreatedStreams() { + return createdStreams; + } + } + + private static class CloseTrackingInputStream extends FilterInputStream { + private boolean isClosed = false; + + CloseTrackingInputStream(InputStream in) { + super(in); + } + + @Override + public void close() throws IOException { + super.close(); + isClosed = true; + } + + boolean isClosed() { + return isClosed; + } + } } From 4bae174bcc60a8b3b0fa9c8423e860e97ea7d0b3 Mon Sep 17 00:00:00 2001 From: David Ho <70000000+davidh44@users.noreply.github.com> Date: Fri, 11 Aug 2023 15:38:32 -0700 Subject: [PATCH 15/17] Merge from master (#4297) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Amazon WorkSpaces Update: Fixed VolumeEncryptionKey descriptions * Amazon Relational Database Service Update: Adds support for the DBSystemID parameter of CreateDBInstance to RDS Custom for Oracle. * AWS Elemental MediaConvert Update: This release includes improvements to Preserve 444 handling, compatibility of HEVC sources without frame rates, and general improvements to MP4 outputs. * AWS Glue Update: This release adds support for AWS Glue Crawler with Apache Hudi Tables, allowing Crawlers to discover Hudi Tables in S3 and register them in Glue Data Catalog for query engines to query against. * Updated endpoints.json and partitions.json. * Release 2.20.109. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.110-SNAPSHOT * Amazon Chime SDK Media Pipelines Update: AWS Media Pipeline compositing enhancement and Media Insights Pipeline auto language identification. * Amazon QuickSight Update: This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips. * AWS Cost Explorer Service Update: This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID. * AWS Glue Update: Added support for Data Preparation Recipe node in Glue Studio jobs * Amazon Elastic Compute Cloud Update: Add "disabled" enum value to SpotInstanceState. * AWS CloudFormation Update: This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts. * AmazonApiGatewayV2 Update: Documentation updates for Amazon API Gateway. * Updated endpoints.json and partitions.json. * Release 2.20.110. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.111-SNAPSHOT * Fixed bug in IAM policy builder where actions were being written instead of resources. (#4223) Also fixed javadoc issue, where create() doesn't exist on the IAM client. * Amazon DynamoDB Update: Documentation updates for DynamoDB * Amazon Relational Database Service Update: This release adds support for monitoring storage optimization progress on the DescribeDBInstances API. * AWS Lambda Update: Add Python 3.11 (python3.11) support to AWS Lambda * AWS SecurityHub Update: Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters * AWS DataSync Update: AWS DataSync now supports Microsoft Azure Blob Storage locations. * Amazon SageMaker Service Update: Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API * Amazon Connect Wisdom Service Update: This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat * AWS Transfer Family Update: This release adds support for SFTP Connectors. * AWSBillingConductor Update: Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups. * Amazon Elastic Compute Cloud Update: This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes. * AWS Security Token Service Update: API updates for the AWS Security Token Service * EMR Serverless Update: This release adds support for publishing application logs to CloudWatch. * Amazon Connect Customer Profiles Update: Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences. * Release 2.20.111. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.112-SNAPSHOT * Amazon HealthLake Update: Updating the HealthLake service documentation. * Amazon Omics Update: The service is renaming as a part of AWS Health. * Amazon Polly Update: Amazon Polly adds 1 new voice - Lisa (nl-BE) * OpenSearch Service Serverless Update: This release adds new collection type VectorSearch. * Amazon Managed Blockchain Query Update: Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs. * AWS Elemental MediaConvert Update: This release includes general updates to user documentation. * Amazon Route 53 Update: Update that corrects the documents for received feedback. * AWS Cloud Control API Update: Updates the documentation for CreateResource. * AWS EntityResolution Update: AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information. * AWS Glue Update: Release Glue Studio Snowflake Connector Node for SDK/CLI * Updated endpoints.json and partitions.json. * Release 2.20.112. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.113-SNAPSHOT * Amazon SageMaker Service Update: Expose ProfilerConfig attribute in SageMaker Search API response. * Amazon Elastic Block Store Update: SDK and documentation updates for Amazon Elastic Block Store API * Auto Scaling Update: This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy. * Amazon Elastic Compute Cloud Update: SDK and documentation updates for Amazon Elastic Block Store APIs * Amazon Elastic Kubernetes Service Update: Add multiple customer error code to handle customer caused failure when managing EKS node groups * Release 2.20.113. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.114-SNAPSHOT * Cleanup unused imports/members (#4234) * Amazon Simple Queue Service Update: Documentation changes related to SQS APIs. * Release 2.20.114. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.115-SNAPSHOT * Managed Streaming for Kafka Update: Amazon MSK has introduced new versions of ListClusterOperations and DescribeClusterOperation APIs. These v2 APIs provide information and insights into the ongoing operations of both MSK Provisioned and MSK Serverless clusters. * Amazon CloudFront Update: Add a new JavaScript runtime version for CloudFront Functions. * Amazon Pinpoint Update: Added support for sending push notifications using the FCM v1 API with json credentials. Amazon Pinpoint customers can now deliver messages to Android devices using both FCM v1 API and the legacy FCM/GCM API * Amazon Connect Service Update: This release adds support for new number types. * Amazon CloudWatch Application Insights Update: This release enable customer to add/remove/update more than one workload for a component * AWS CloudFormation Update: This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet. * Updated endpoints.json and partitions.json. * Release 2.20.115. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.116-SNAPSHOT * AWS Amplify UI Builder Update: Amplify Studio releases GraphQL support for codegen job action. * Elastic Disaster Recovery Service Update: Add support for in-aws right sizing * AWS CodeStar connections Update: New integration with the Gitlab provider type. * AWS Clean Rooms Service Update: This release introduces custom SQL queries - an expanded set of SQL you can run. This release adds analysis templates, a new resource for storing pre-defined custom SQL queries ahead of time. This release also adds the Custom analysis rule, which lets you approve analysis templates for querying. * Amazon Omics Update: Add CreationType filter for ListReadSets * Inspector2 Update: This release adds 1 new API: BatchGetFindingDetails to retrieve enhanced vulnerability intelligence details for findings. * Amazon Relational Database Service Update: This release adds support for Aurora MySQL local write forwarding, which allows for forwarding of write operations from reader DB instances to the writer DB instance. * Amazon Lookout for Equipment Update: This release includes new import resource, model versioning and resource policy features. * Amazon EventBridge Scheduler Update: This release introduces automatic deletion of schedules in EventBridge Scheduler. If configured, EventBridge Scheduler automatically deletes a schedule after the schedule has completed its last invocation. * Amazon Route 53 Update: Amazon Route 53 now supports the Israel (Tel Aviv) Region (il-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. * Auto Scaling Update: You can now configure an instance refresh to set its status to 'failed' when it detects that a specified CloudWatch alarm has gone into the ALARM state. You can also choose to roll back the instance refresh automatically when the alarm threshold is met. * Updated endpoints.json and partitions.json. * Release 2.20.116. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.117-SNAPSHOT * Amazon SageMaker Service Update: Add Stairs TrafficPattern and FlatInvocations to RecommendationJobStoppingConditions * AWS Batch Update: This release adds support for price capacity optimized allocation strategy for Spot Instances. * Amazon CloudWatch Internet Monitor Update: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event. * Amazon Relational Database Service Update: Added support for deleted clusters PiTR. * AWS Database Migration Service Update: Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version. * AWS Elemental MediaLive Update: AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone. * Amazon Polly Update: Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only. * Updated endpoints.json and partitions.json. * Release 2.20.117. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.118-SNAPSHOT * AWS Resilience Hub Update: Drift Detection capability added when applications policy has moved from a meet to breach state. Customers will be able to exclude operational recommendations and receive credit in their resilience score. Customers can now add ARH permissions to an existing or new role. * Amazon Cognito Identity Provider Update: New feature that logs Cognito user pool error messages to CloudWatch logs. * AWS Glue Update: This release includes additional Glue Streaming KAKFA SASL property types. * Amazon SageMaker Service Update: SageMaker Inference Recommender introduces a new API GetScalingConfigurationRecommendation to recommend auto scaling policies based on completed Inference Recommender jobs. * AWS Budgets Update: As part of CAE tagging integration we need to update our budget names regex filter to prevent customers from using "/action/" in their budget names. * Updated endpoints.json and partitions.json. * Release 2.20.118. Updated CHANGELOG.md, README.md and all pom.xml. * Fixed an issue in ChecksumCalculatingAsyncRequestBody where the posit… (#4244) * Fixed an issue in ChecksumCalculatingAsyncRequestBody where the position of the ByteBuffer was not honored. * Fix checkstyle * rename methods and variables * Add javadocs * Update to next snapshot version: 2.20.119-SNAPSHOT * Add Expect 100-continue for UploadPartRequest (#4252) * Add Expect 100-continue for UploadPartRequest * Fix typo * Bump crt to 0.24.0 (#4256) * Amazon SageMaker Service Update: Amazon SageMaker now supports running training jobs on p5.48xlarge instance types. * Amazon Elastic Compute Cloud Update: This release adds new parameter isPrimaryIPv6 to allow assigning an IPv6 address as a primary IPv6 address to a network interface which cannot be changed to give equivalent functionality available for network interfaces with primary IPv4 address. * Auto Scaling Update: Documentation changes related to Amazon EC2 Auto Scaling APIs. * AWS Database Migration Service Update: The release makes public API for DMS Schema Conversion feature. * AWS Cloud9 Update: Updated the deprecation date for Amazon Linux. Doc only update. * Release 2.20.119. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.120-SNAPSHOT * AWS Certificate Manager Private Certificate Authority Update: Documentation correction for AWS Private CA * Amazon Connect Service Update: Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile. * Amazon SageMaker Service Update: Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object * AWS DataSync Update: Display cloud storage used capacity at a cluster level. * Amazon EC2 Container Service Update: This is a documentation update to address various tickets. * Updated endpoints.json and partitions.json. * Release 2.20.120. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.121-SNAPSHOT * Java based S3 Multipart Client (#4254) * Implement multipart upload in Java-based S3 async client (#4052) * Implement multipart upload in Java-based S3 async client Co-authored-by: Matthew Miller * Iterate SdkFields to convert requests (#4177) * Iterate SdkFields to convert requests * Fix flaky test * Rename convertion utils class * Fix null content length in SplittingPublisher (#4173) * Implement multipart copy in Java-based S3 async client (#4189) * Create split method in AsyncRequestBody to return SplittingPublisher (#4188) * Create split method in AsyncRequestBody to return SplittingPublisher * Fix Javadoc and build * Add more tests with ByteArrayAsyncRequestBody (#4214) * Handle null response metadata (#4215) * Handle null response metadata * Fix build * Support streaming with unknown content length (#4226) * Support uploading with unknown content length * Refactoring * Create a configuration class for SdkPublisher#split (#4236) * S3 Multipart API implementation (#4235) * Multipart API fix merge conflicts * getObject(...) throw UnsupportedOperationException * Use user agent for all requests in MultipartS3Client * MultipartS3AsyncClient javadoc + API_NAME private * use `maximumMemoryUsageInBytes` * fix problem with UserAgent, cleanup * move contextParam keys to S3AsyncClientDecorator * javadoc * more javadoc * Use 4x part size as default apiCallBufferSize * Fix test * Guard against re-subscription in SplittingPublisher (#4253) * guard against re-subscription in SplittingPublisher * fix checkstyle * Error msg * Fix a race condition where the third upload part request was sent before the second one (#4260) --------- Co-authored-by: Zoe Wang <33073555+zoewangg@users.noreply.github.com> * Amazon Interactive Video Service RealTime Update: Add QUOTA_EXCEEDED and PUBLISHER_NOT_FOUND to EventErrorCode for stage health events. * Amazon Kinesis Video Streams Update: This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature. * Amazon Rekognition Update: This release adds code snippets for Amazon Rekognition Custom Labels. * Amazon Kinesis Video Streams Archived Media Update: This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature. * Amazon Detective Update: Updated the email validation regex to be in line with the TLD name specifications. * Updated endpoints.json and partitions.json. * Release 2.20.121. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.122-SNAPSHOT * Update to next snapshot version: 2.20.122-SNAPSHOT (#4275) * Update to next snapshot version: 2.20.122-SNAPSHOT * Revert previous version number * AWS Service Catalog Update: Introduce support for HashiCorp Terraform Cloud in Service Catalog by addying TERRAFORM_CLOUD product type in CreateProduct and CreateProvisioningArtifact API. * AWS Backup Update: This release introduces a new logically air-gapped vault (Preview) in AWS Backup that stores immutable backup copies, which are locked by default and isolated with encryption using AWS owned keys. Logically air-gapped vault (Preview) allows secure recovery of application data across accounts. * Amazon ElastiCache Update: Added support for cluster mode in online migration and test migration API * Updated endpoints.json and partitions.json. * Release 2.20.122. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.123-SNAPSHOT * Add default methods to AwsServiceClientConfiguration.Builder and SdkServiceClientConfiguration.Builder. (#4263) This allows older client versions to still compile with newer runtime versions. * Expose thresholdSizeInBytes in AWS CRT-based S3 client (#4282) * AWS Global Accelerator Update: Documentation update for dualstack EC2 endpoint support * Amazon FSx Update: For FSx for Lustre, add new data repository task type, RELEASE_DATA_FROM_FILESYSTEM, to release files that have been archived to S3. For FSx for Windows, enable support for configuring and updating SSD IOPS, and for updating storage type. For FSx for OpenZFS, add new deployment type, MULTI_AZ_1. * Amazon Chime SDK Voice Update: Updating CreatePhoneNumberOrder, UpdatePhoneNumber and BatchUpdatePhoneNumbers APIs, adding phone number name * Amazon GuardDuty Update: Added autoEnable ALL to UpdateOrganizationConfiguration and DescribeOrganizationConfiguration APIs. * Amazon SageMaker Service Update: This release adds support for cross account access for SageMaker Model Cards through AWS RAM. * Updated endpoints.json and partitions.json. * Release 2.20.123. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.124-SNAPSHOT * Fix immutableCopyOf bug (#4266) * Set limit when cloning ByteBuffer * Add changelog * Add L-Applin and breader124 to the all-contributors hall (#4276) * Fix for Issue [#4156](https://github.com/aws/aws-sdk-java-v2/issues/4156) : Single quotes in toJson conversions for EnhancedDocuments are no longer being escaped. (#4277) * Amazon Connect Service Update: This release adds APIs to provision agents that are global / available in multiple AWS regions and distribute them across these regions by percentage. * AWS Secrets Manager Update: Add additional InvalidRequestException to list of possible exceptions for ListSecret. * AWS CloudTrail Update: Documentation updates for CloudTrail. * AWS Transfer Family Update: Documentation updates for AW Transfer Family * Elastic Load Balancing Update: This release enables configuring security groups for Network Load Balancers * Amazon Omics Update: This release adds instanceType to GetRunTask & ListRunTasks responses. * Updated endpoints.json and partitions.json. * Release 2.20.124. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.125-SNAPSHOT * AmplifyBackend Update: Adds sensitive trait to required input shapes. * AWS Config Update: Updated ResourceType enum with new resource types onboarded by AWS Config in July 2023. * Amazon Elastic Compute Cloud Update: Amazon EC2 P5 instances, powered by the latest NVIDIA H100 Tensor Core GPUs, deliver the highest performance in EC2 for deep learning (DL) and HPC applications. M7i-flex and M7i instances are next-generation general purpose instances powered by custom 4th Generation Intel Xeon Scalable processors. * Amazon Simple Email Service Update: Doc only updates to include: 1) Clarified which part of an email address where it's okay to have Punycode when it contains non-ASCII characters for the SendRawEmail action and other actions where this is applicable. 2) Updated S3Action description with new MB max bucket size from 30 to 40. * Amazon Simple Workflow Service Update: This release adds new API parameters to override workflow task list for workflow executions. * Amazon QuickSight Update: New Authentication method for Account subscription - IAM Identity Center. Hierarchy layout support, default column width support and related style properties for pivot table visuals. Non-additive topic field aggregations for Topic API * Updated endpoints.json and partitions.json. * Release 2.20.125. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.126-SNAPSHOT --------- Co-authored-by: AWS <> Co-authored-by: aws-sdk-java-automation <43143862+aws-sdk-java-automation@users.noreply.github.com> Co-authored-by: Matthew Miller Co-authored-by: Dongie Agnir <261310+dagnir@users.noreply.github.com> Co-authored-by: Zoe Wang <33073555+zoewangg@users.noreply.github.com> Co-authored-by: Olivier L Applin Co-authored-by: Debora N. Ito <476307+debora-ito@users.noreply.github.com> Co-authored-by: John Viegas <70235430+joviegas@users.noreply.github.com> --- .all-contributorsrc | 18 + .changes/2.20.109.json | 42 + .changes/2.20.110.json | 54 + .changes/2.20.111.json | 90 + .changes/2.20.112.json | 72 + .changes/2.20.113.json | 36 + .changes/2.20.114.json | 12 + .changes/2.20.115.json | 48 + .changes/2.20.116.json | 78 + .changes/2.20.117.json | 54 + .changes/2.20.118.json | 42 + .changes/2.20.119.json | 42 + .changes/2.20.120.json | 42 + .changes/2.20.121.json | 42 + .changes/2.20.122.json | 30 + .changes/2.20.123.json | 48 + .changes/2.20.124.json | 60 + .changes/2.20.125.json | 48 + .../bugfix-S3TransferManager-d9c09d4.json | 6 - CHANGELOG.md | 510 ++++- README.md | 14 +- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 12 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 12 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- .../customization/CustomizationConfig.java | 13 + .../customization/MultipartCustomization.java | 64 + .../amazon/awssdk/codegen/poet/ClassSpec.java | 2 +- .../poet/builder/AsyncClientBuilderClass.java | 64 +- .../builder/AsyncClientBuilderInterface.java | 77 +- .../codegen/rules/partitions.json.resource | 5 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- .../awssdk/awscore/AwsResponseMetadata.java | 3 +- .../AwsServiceClientConfiguration.java | 20 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- .../regions/internal/region/endpoints.json | 417 +++- core/sdk-core/pom.xml | 2 +- .../core/SdkServiceClientConfiguration.java | 24 +- .../awssdk/core/async/AsyncRequestBody.java | 39 + .../AsyncRequestBodySplitConfiguration.java | 141 ++ .../ChecksumCalculatingAsyncRequestBody.java | 2 +- .../core/internal/async/ChunkBuffer.java | 141 +- .../internal/async/SplittingPublisher.java | 331 +++ .../AsyncRequestBodyConfigurationTest.java | 58 + .../core/async/AsyncRequestBodyTest.java | 2 + .../awssdk/core/async/ChunkBufferTest.java | 68 +- ...ecksumCalculatingAsyncRequestBodyTest.java | 105 +- .../async/SplittingPublisherTest.java | 279 +++ http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- .../crt/AwsCrtHttpClientWireMockTest.java | 7 - http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 6 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- .../document/JsonStringFormatHelper.java | 3 - .../document/EnhancedDocumentTest.java | 27 +- services-custom/iam-policy-builder/pom.xml | 2 +- .../awssdk/policybuilder/iam/IamPolicy.java | 4 +- .../policybuilder/iam/IamPolicyReader.java | 2 +- .../policybuilder/iam/IamPolicyWriter.java | 2 +- .../iam/internal/DefaultIamPolicyWriter.java | 2 +- .../iam/IamPolicyReaderTest.java | 87 +- .../iam/IamPolicyWriterTest.java | 72 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- .../codegen-resources/service-2.json | 4 +- services/alexaforbusiness/pom.xml | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 340 +-- .../codegen-resources/endpoint-tests.json | 763 ++----- .../codegen-resources/service-2.json | 4 + services/amplifyuibuilder/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 74 +- services/apigateway/pom.xml | 2 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 399 ++-- .../codegen-resources/endpoint-tests.json | 1591 ++----------- .../codegen-resources/service-2.json | 14 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- services/applicationinsights/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 399 ++-- .../codegen-resources/endpoint-tests.json | 1187 ++-------- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 501 ++++- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 ++- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 35 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 199 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- .../codegen-resources/service-2.json | 18 +- services/billingconductor/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 237 +- .../codegen-resources/endpoint-tests.json | 126 +- .../codegen-resources/service-2.json | 236 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 655 +----- .../codegen-resources/endpoint-tests.json | 313 ++- .../codegen-resources/service-2.json | 8 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- .../codegen-resources/service-2.json | 218 +- services/chimesdkmeetings/pom.xml | 2 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 86 +- .../codegen-resources/service-2.json | 23 + services/cleanrooms/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 86 +- .../codegen-resources/paginators-1.json | 12 + .../codegen-resources/service-2.json | 983 +++++++- services/cloud9/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 392 ++-- .../codegen-resources/endpoint-tests.json | 869 ++----- .../codegen-resources/service-2.json | 2 +- services/cloudcontrol/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 399 ++-- .../codegen-resources/endpoint-tests.json | 1319 ++--------- .../codegen-resources/service-2.json | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 ++- .../codegen-resources/service-2.json | 198 +- services/cloudfront/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 538 +---- .../codegen-resources/endpoint-tests.json | 134 +- .../codegen-resources/service-2.json | 26 +- services/cloudhsm/pom.xml | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 392 ++-- .../codegen-resources/service-2.json | 10 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- services/codedeploy/pom.xml | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 399 ++-- .../codegen-resources/endpoint-tests.json | 714 ++---- .../codegen-resources/service-2.json | 26 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- .../codegen-resources/service-2.json | 465 ++-- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- services/config/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 ++- .../codegen-resources/service-2.json | 37 +- services/connect/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 ++- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 312 ++- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- services/costexplorer/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 914 +++----- .../codegen-resources/endpoint-tests.json | 315 ++- .../codegen-resources/service-2.json | 214 +- services/customerprofiles/pom.xml | 2 +- .../codegen-resources/service-2.json | 331 ++- services/databasemigration/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 438 ++-- .../codegen-resources/paginators-1.json | 50 + .../codegen-resources/service-2.json | 2002 ++++++++++++++++- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 291 ++- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 2 +- services/devicefarm/pom.xml | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- .../codegen-resources/service-2.json | 3 +- services/dynamodb/pom.xml | 2 +- .../codegen-resources/dynamodb/service-2.json | 6 +- services/ebs/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 399 ++-- .../codegen-resources/endpoint-tests.json | 630 +++++- .../codegen-resources/service-2.json | 37 +- services/ec2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 ++- .../codegen-resources/service-2.json | 127 +- .../codegen-resources/waiters-2.json | 25 + services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 42 +- services/efs/pom.xml | 2 +- services/eks/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 442 ++-- .../codegen-resources/endpoint-tests.json | 1452 ++---------- .../codegen-resources/service-2.json | 16 +- services/elasticache/pom.xml | 2 +- .../codegen-resources/service-2.json | 44 +- services/elasticbeanstalk/pom.xml | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 398 ++-- .../codegen-resources/endpoint-tests.json | 413 ++-- .../codegen-resources/service-2.json | 38 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- .../codegen-resources/service-2.json | 66 +- services/entityresolution/pom.xml | 60 + .../codegen-resources/endpoint-rule-set.json | 350 +++ .../codegen-resources/endpoint-tests.json | 295 +++ .../codegen-resources/paginators-1.json | 22 + .../codegen-resources/service-2.json | 1543 +++++++++++++ services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 139 +- services/gamelift/pom.xml | 2 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 349 +-- .../codegen-resources/endpoint-tests.json | 279 ++- .../codegen-resources/service-2.json | 12 +- services/glue/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 249 +- services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 ++- .../codegen-resources/service-2.json | 7 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- .../codegen-resources/service-2.json | 144 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 177 +- services/internetmonitor/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 244 +- .../codegen-resources/service-2.json | 89 +- services/iot/pom.xml | 2 +- services/iot1clickdevices/pom.xml | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 123 +- .../codegen-resources/service-2.json | 10 +- services/kafka/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 303 +++ services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- services/kinesisanalytics/pom.xml | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 8 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 399 ++-- .../codegen-resources/endpoint-tests.json | 1144 ++-------- .../codegen-resources/service-2.json | 29 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- .../codegen-resources/service-2.json | 3 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 340 +-- .../codegen-resources/endpoint-tests.json | 243 +- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 737 +++++- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 60 + .../codegen-resources/endpoint-rule-set.json | 350 +++ .../codegen-resources/endpoint-tests.json | 314 +++ .../codegen-resources/paginators-1.json | 22 + .../codegen-resources/service-2.json | 1018 +++++++++ .../codegen-resources/waiters-2.json | 5 + services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- .../codegen-resources/service-2.json | 1342 +++++------ services/medialive/pom.xml | 2 +- .../codegen-resources/service-2.json | 38 +- services/mediapackage/pom.xml | 2 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- services/mediastoredata/pom.xml | 2 +- services/mediatailor/pom.xml | 2 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- services/mq/pom.xml | 2 +- services/mturk/pom.xml | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 161 +- .../codegen-resources/service-2.json | 37 +- services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- .../codegen-resources/service-2.json | 7 +- services/opsworks/pom.xml | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- services/pinpoint/pom.xml | 2 +- .../codegen-resources/service-2.json | 436 ++-- services/pinpointemail/pom.xml | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- .../codegen-resources/service-2.json | 8 +- services/pom.xml | 4 +- services/pricing/pom.xml | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/paginators-1.json | 42 + .../codegen-resources/service-2.json | 760 ++++++- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 ++- .../codegen-resources/service-2.json | 371 ++- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 26 +- services/resiliencehub/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 1031 ++++++--- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 1593 ++++--------- .../codegen-resources/endpoint-tests.json | 422 +++- .../codegen-resources/service-2.json | 17 +- services/route53domains/pom.xml | 2 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 2 +- .../services/s3/S3IntegrationTestBase.java | 2 +- .../crt/S3CrossRegionCrtIntegrationTest.java | 2 +- .../S3CrtClientPutObjectIntegrationTest.java | 2 +- ...S3ClientMultiPartCopyIntegrationTest.java} | 77 +- ...ltipartClientPutObjectIntegrationTest.java | 136 ++ .../services/s3/S3CrtAsyncClientBuilder.java | 20 + .../client/S3AsyncClientDecorator.java | 24 +- .../crt/CopyRequestConversionUtils.java | 168 -- .../internal/crt/DefaultS3CrtAsyncClient.java | 17 +- .../s3/internal/crt/S3CrtAsyncHttpClient.java | 1 + .../crt/S3NativeClientConfiguration.java | 13 + .../crt/UploadPartCopyRequestIterable.java | 9 +- ....java => StreamingRequestInterceptor.java} | 5 +- .../{crt => multipart}/CopyObjectHelper.java | 117 +- .../multipart/GenericMultipartHelper.java | 140 ++ .../multipart/MultipartS3AsyncClient.java | 108 + .../multipart/MultipartUploadHelper.java | 147 ++ .../multipart/SdkPojoConversionUtils.java | 195 ++ .../multipart/UploadObjectHelper.java | 73 + .../UploadWithKnownContentLengthHelper.java | 256 +++ .../UploadWithUnknownContentLengthHelper.java | 249 ++ .../s3/multipart/MultipartConfiguration.java | 199 ++ .../codegen-resources/customization.config | 9 +- .../s3/internal/crt/CopyObjectHelperTest.java | 36 +- .../crt/S3CrtAsyncHttpClientTest.java | 22 +- ...a => StreamingRequestInterceptorTest.java} | 15 +- .../s3/internal/multipart/MpuTestUtils.java | 65 + .../MultipartClientUserAgentTest.java | 82 + .../S3MultipartClientBuilderTest.java | 63 + .../SdkPojoConversionUtilsTest.java} | 119 +- .../multipart/UploadObjectHelperTest.java | 412 ++++ services/s3control/pom.xml | 2 +- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 400 ++-- .../codegen-resources/service-2.json | 316 ++- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 398 ++-- .../codegen-resources/service-2.json | 25 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- .../codegen-resources/service-2.json | 1 + services/securityhub/pom.xml | 2 +- .../codegen-resources/service-2.json | 110 +- services/securitylake/pom.xml | 2 +- .../serverlessapplicationrepository/pom.xml | 2 +- services/servicecatalog/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 30 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- services/ses/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 399 ++-- .../codegen-resources/endpoint-tests.json | 1081 ++------- .../codegen-resources/service-2.json | 522 ++--- services/sesv2/pom.xml | 2 +- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- .../codegen-resources/service-2.json | 10 +- services/ssm/pom.xml | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 68 +- .../codegen-resources/service-2.json | 30 +- services/support/pom.xml | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 ++- .../codegen-resources/service-2.json | 22 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 ++- .../codegen-resources/service-2.json | 146 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 148 +- .../codegen-resources/service-2.json | 32 + services/workdocs/pom.xml | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- .../codegen-resources/service-2.json | 4 +- services/workspacesweb/pom.xml | 2 +- services/xray/pom.xml | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/pom.xml | 2 +- test/sdk-benchmarks/pom.xml | 2 +- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- .../amazon/awssdk/utils/BinaryUtils.java | 4 +- .../awssdk/utils/async/SimplePublisher.java | 2 +- .../amazon/awssdk/utils/BinaryUtilsTest.java | 11 + 659 files changed, 34059 insertions(+), 22647 deletions(-) create mode 100644 .changes/2.20.109.json create mode 100644 .changes/2.20.110.json create mode 100644 .changes/2.20.111.json create mode 100644 .changes/2.20.112.json create mode 100644 .changes/2.20.113.json create mode 100644 .changes/2.20.114.json create mode 100644 .changes/2.20.115.json create mode 100644 .changes/2.20.116.json create mode 100644 .changes/2.20.117.json create mode 100644 .changes/2.20.118.json create mode 100644 .changes/2.20.119.json create mode 100644 .changes/2.20.120.json create mode 100644 .changes/2.20.121.json create mode 100644 .changes/2.20.122.json create mode 100644 .changes/2.20.123.json create mode 100644 .changes/2.20.124.json create mode 100644 .changes/2.20.125.json delete mode 100644 .changes/next-release/bugfix-S3TransferManager-d9c09d4.json create mode 100644 codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/MultipartCustomization.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyConfigurationTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java create mode 100644 services/entityresolution/pom.xml create mode 100644 services/entityresolution/src/main/resources/codegen-resources/endpoint-rule-set.json create mode 100644 services/entityresolution/src/main/resources/codegen-resources/endpoint-tests.json create mode 100644 services/entityresolution/src/main/resources/codegen-resources/paginators-1.json create mode 100644 services/entityresolution/src/main/resources/codegen-resources/service-2.json create mode 100644 services/managedblockchainquery/pom.xml create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-rule-set.json create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-tests.json create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/paginators-1.json create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/service-2.json create mode 100644 services/managedblockchainquery/src/main/resources/codegen-resources/waiters-2.json rename services/s3/src/it/java/software/amazon/awssdk/services/s3/{crt/S3CrtClientCopyIntegrationTest.java => multipart/S3ClientMultiPartCopyIntegrationTest.java} (70%) create mode 100644 services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3MultipartClientPutObjectIntegrationTest.java delete mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyRequestConversionUtils.java rename services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/{PutObjectInterceptor.java => StreamingRequestInterceptor.java} (85%) rename services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/{crt => multipart}/CopyObjectHelper.java (71%) create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/GenericMultipartHelper.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartUploadHelper.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtils.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithUnknownContentLengthHelper.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java rename services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/{PutObjectInterceptorTest.java => StreamingRequestInterceptorTest.java} (74%) create mode 100644 services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuTestUtils.java create mode 100644 services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartClientUserAgentTest.java create mode 100644 services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/S3MultipartClientBuilderTest.java rename services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/{crt/CopyRequestConversionUtilsTest.java => multipart/SdkPojoConversionUtilsTest.java} (61%) create mode 100644 services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java diff --git a/.all-contributorsrc b/.all-contributorsrc index 9aad5c2d7b7..dac08ca7d53 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -823,6 +823,24 @@ "contributions": [ "code" ] + }, + { + "login": "L-Applin", + "name": "Olivier L Applin", + "avatar_url": "https://avatars.githubusercontent.com/u/16511950?v=4", + "profile": "http://applin.ca", + "contributions": [ + "code" + ] + }, + { + "login": "breader124", + "name": "Adrian Chlebosz", + "avatar_url": "https://avatars.githubusercontent.com/u/36669019?v=4", + "profile": "https://github.com/breader124", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, diff --git a/.changes/2.20.109.json b/.changes/2.20.109.json new file mode 100644 index 00000000000..643bd370bd3 --- /dev/null +++ b/.changes/2.20.109.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.109", + "date": "2023-07-21", + "entries": [ + { + "type": "bugfix", + "category": "S3 Transfer Manager", + "contributor": "", + "description": "Fix a bug where the SSE-C parameters are not copied to the CompleteMultipartUpload request when transforming to a multipart copy." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes improvements to Preserve 444 handling, compatibility of HEVC sources without frame rates, and general improvements to MP4 outputs." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "This release adds support for AWS Glue Crawler with Apache Hudi Tables, allowing Crawlers to discover Hudi Tables in S3 and register them in Glue Data Catalog for query engines to query against." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adds support for the DBSystemID parameter of CreateDBInstance to RDS Custom for Oracle." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "Fixed VolumeEncryptionKey descriptions" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.110.json b/.changes/2.20.110.json new file mode 100644 index 00000000000..5861938b340 --- /dev/null +++ b/.changes/2.20.110.json @@ -0,0 +1,54 @@ +{ + "version": "2.20.110", + "date": "2023-07-24", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Added support for Data Preparation Recipe node in Glue Studio jobs" + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "contributor": "", + "description": "Documentation updates for Amazon API Gateway." + }, + { + "type": "feature", + "category": "Amazon Chime SDK Media Pipelines", + "contributor": "", + "description": "AWS Media Pipeline compositing enhancement and Media Insights Pipeline auto language identification." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Add \"disabled\" enum value to SpotInstanceState." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.111.json b/.changes/2.20.111.json new file mode 100644 index 00000000000..f6337e44b74 --- /dev/null +++ b/.changes/2.20.111.json @@ -0,0 +1,90 @@ +{ + "version": "2.20.111", + "date": "2023-07-25", + "entries": [ + { + "type": "bugfix", + "category": "AWS IAM Policy Builder", + "contributor": "", + "description": "Fixed bug where actions were written instead of resources." + }, + { + "type": "feature", + "category": "AWSBillingConductor", + "contributor": "", + "description": "Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups." + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "AWS DataSync now supports Microsoft Azure Blob Storage locations." + }, + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Add Python 3.11 (python3.11) support to AWS Lambda" + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters" + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "contributor": "", + "description": "API updates for the AWS Security Token Service" + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "This release adds support for SFTP Connectors." + }, + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences." + }, + { + "type": "feature", + "category": "Amazon Connect Wisdom Service", + "contributor": "", + "description": "This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "Documentation updates for DynamoDB" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release adds support for monitoring storage optimization progress on the DescribeDBInstances API." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API" + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "This release adds support for publishing application logs to CloudWatch." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.112.json b/.changes/2.20.112.json new file mode 100644 index 00000000000..b324d8bea52 --- /dev/null +++ b/.changes/2.20.112.json @@ -0,0 +1,72 @@ +{ + "version": "2.20.112", + "date": "2023-07-26", + "entries": [ + { + "type": "feature", + "category": "AWS Cloud Control API", + "contributor": "", + "description": "Updates the documentation for CreateResource." + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes general updates to user documentation." + }, + { + "type": "feature", + "category": "AWS EntityResolution", + "contributor": "", + "description": "AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Release Glue Studio Snowflake Connector Node for SDK/CLI" + }, + { + "type": "feature", + "category": "Amazon HealthLake", + "contributor": "", + "description": "Updating the HealthLake service documentation." + }, + { + "type": "feature", + "category": "Amazon Managed Blockchain Query", + "contributor": "", + "description": "Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs." + }, + { + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "The service is renaming as a part of AWS Health." + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds 1 new voice - Lisa (nl-BE)" + }, + { + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "Update that corrects the documents for received feedback." + }, + { + "type": "feature", + "category": "OpenSearch Service Serverless", + "contributor": "", + "description": "This release adds new collection type VectorSearch." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.113.json b/.changes/2.20.113.json new file mode 100644 index 00000000000..fc5fa7c5ce3 --- /dev/null +++ b/.changes/2.20.113.json @@ -0,0 +1,36 @@ +{ + "version": "2.20.113", + "date": "2023-07-27", + "entries": [ + { + "type": "feature", + "category": "Amazon Elastic Block Store", + "contributor": "", + "description": "SDK and documentation updates for Amazon Elastic Block Store API" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "SDK and documentation updates for Amazon Elastic Block Store APIs" + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Add multiple customer error code to handle customer caused failure when managing EKS node groups" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Expose ProfilerConfig attribute in SageMaker Search API response." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.114.json b/.changes/2.20.114.json new file mode 100644 index 00000000000..d03a88e1077 --- /dev/null +++ b/.changes/2.20.114.json @@ -0,0 +1,12 @@ +{ + "version": "2.20.114", + "date": "2023-07-27", + "entries": [ + { + "type": "feature", + "category": "Amazon Simple Queue Service", + "contributor": "", + "description": "Documentation changes related to SQS APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.115.json b/.changes/2.20.115.json new file mode 100644 index 00000000000..5b4f41b6d87 --- /dev/null +++ b/.changes/2.20.115.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.115", + "date": "2023-07-28", + "entries": [ + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet." + }, + { + "type": "feature", + "category": "Amazon CloudFront", + "contributor": "", + "description": "Add a new JavaScript runtime version for CloudFront Functions." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Application Insights", + "contributor": "", + "description": "This release enable customer to add/remove/update more than one workload for a component" + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds support for new number types." + }, + { + "type": "feature", + "category": "Amazon Pinpoint", + "contributor": "", + "description": "Added support for sending push notifications using the FCM v1 API with json credentials. Amazon Pinpoint customers can now deliver messages to Android devices using both FCM v1 API and the legacy FCM/GCM API" + }, + { + "type": "feature", + "category": "Managed Streaming for Kafka", + "contributor": "", + "description": "Amazon MSK has introduced new versions of ListClusterOperations and DescribeClusterOperation APIs. These v2 APIs provide information and insights into the ongoing operations of both MSK Provisioned and MSK Serverless clusters." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.116.json b/.changes/2.20.116.json new file mode 100644 index 00000000000..4f5480b6a4f --- /dev/null +++ b/.changes/2.20.116.json @@ -0,0 +1,78 @@ +{ + "version": "2.20.116", + "date": "2023-07-31", + "entries": [ + { + "type": "feature", + "category": "AWS Amplify UI Builder", + "contributor": "", + "description": "Amplify Studio releases GraphQL support for codegen job action." + }, + { + "type": "feature", + "category": "AWS Clean Rooms Service", + "contributor": "", + "description": "This release introduces custom SQL queries - an expanded set of SQL you can run. This release adds analysis templates, a new resource for storing pre-defined custom SQL queries ahead of time. This release also adds the Custom analysis rule, which lets you approve analysis templates for querying." + }, + { + "type": "feature", + "category": "AWS CodeStar connections", + "contributor": "", + "description": "New integration with the Gitlab provider type." + }, + { + "type": "feature", + "category": "Amazon EventBridge Scheduler", + "contributor": "", + "description": "This release introduces automatic deletion of schedules in EventBridge Scheduler. If configured, EventBridge Scheduler automatically deletes a schedule after the schedule has completed its last invocation." + }, + { + "type": "feature", + "category": "Amazon Lookout for Equipment", + "contributor": "", + "description": "This release includes new import resource, model versioning and resource policy features." + }, + { + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "Add CreationType filter for ListReadSets" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release adds support for Aurora MySQL local write forwarding, which allows for forwarding of write operations from reader DB instances to the writer DB instance." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "Amazon Route 53 now supports the Israel (Tel Aviv) Region (il-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "You can now configure an instance refresh to set its status to 'failed' when it detects that a specified CloudWatch alarm has gone into the ALARM state. You can also choose to roll back the instance refresh automatically when the alarm threshold is met." + }, + { + "type": "feature", + "category": "Elastic Disaster Recovery Service", + "contributor": "", + "description": "Add support for in-aws right sizing" + }, + { + "type": "feature", + "category": "Inspector2", + "contributor": "", + "description": "This release adds 1 new API: BatchGetFindingDetails to retrieve enhanced vulnerability intelligence details for findings." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.117.json b/.changes/2.20.117.json new file mode 100644 index 00000000000..6875a98fc8c --- /dev/null +++ b/.changes/2.20.117.json @@ -0,0 +1,54 @@ +{ + "version": "2.20.117", + "date": "2023-08-01", + "entries": [ + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "This release adds support for price capacity optimized allocation strategy for Spot Instances." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Internet Monitor", + "contributor": "", + "description": "This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event." + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Added support for deleted clusters PiTR." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Add Stairs TrafficPattern and FlatInvocations to RecommendationJobStoppingConditions" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.118.json b/.changes/2.20.118.json new file mode 100644 index 00000000000..f9c9494a13d --- /dev/null +++ b/.changes/2.20.118.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.118", + "date": "2023-08-02", + "entries": [ + { + "type": "feature", + "category": "AWS Budgets", + "contributor": "", + "description": "As part of CAE tagging integration we need to update our budget names regex filter to prevent customers from using \"/action/\" in their budget names." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "This release includes additional Glue Streaming KAKFA SASL property types." + }, + { + "type": "feature", + "category": "AWS Resilience Hub", + "contributor": "", + "description": "Drift Detection capability added when applications policy has moved from a meet to breach state. Customers will be able to exclude operational recommendations and receive credit in their resilience score. Customers can now add ARH permissions to an existing or new role." + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "New feature that logs Cognito user pool error messages to CloudWatch logs." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "SageMaker Inference Recommender introduces a new API GetScalingConfigurationRecommendation to recommend auto scaling policies based on completed Inference Recommender jobs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.119.json b/.changes/2.20.119.json new file mode 100644 index 00000000000..d737645c704 --- /dev/null +++ b/.changes/2.20.119.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.119", + "date": "2023-08-03", + "entries": [ + { + "type": "bugfix", + "category": "Amazon S3", + "contributor": "", + "description": "Add `Expect: 100-continue` header for `UploadPartRequest` so that a upload part request can fail faster if there is a server error." + }, + { + "type": "feature", + "category": "AWS Cloud9", + "contributor": "", + "description": "Updated the deprecation date for Amazon Linux. Doc only update." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "The release makes public API for DMS Schema Conversion feature." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds new parameter isPrimaryIPv6 to allow assigning an IPv6 address as a primary IPv6 address to a network interface which cannot be changed to give equivalent functionality available for network interfaces with primary IPv4 address." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Amazon SageMaker now supports running training jobs on p5.48xlarge instance types." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Documentation changes related to Amazon EC2 Auto Scaling APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.120.json b/.changes/2.20.120.json new file mode 100644 index 00000000000..f8f88ca6099 --- /dev/null +++ b/.changes/2.20.120.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.120", + "date": "2023-08-04", + "entries": [ + { + "type": "feature", + "category": "AWS Certificate Manager Private Certificate Authority", + "contributor": "", + "description": "Documentation correction for AWS Private CA" + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "Display cloud storage used capacity at a cluster level." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This is a documentation update to address various tickets." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.121.json b/.changes/2.20.121.json new file mode 100644 index 00000000000..9750c52dfb2 --- /dev/null +++ b/.changes/2.20.121.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.121", + "date": "2023-08-07", + "entries": [ + { + "type": "feature", + "category": "Amazon Detective", + "contributor": "", + "description": "Updated the email validation regex to be in line with the TLD name specifications." + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service RealTime", + "contributor": "", + "description": "Add QUOTA_EXCEEDED and PUBLISHER_NOT_FOUND to EventErrorCode for stage health events." + }, + { + "type": "feature", + "category": "Amazon Kinesis Video Streams", + "contributor": "", + "description": "This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature." + }, + { + "type": "feature", + "category": "Amazon Kinesis Video Streams Archived Media", + "contributor": "", + "description": "This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature." + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "contributor": "", + "description": "This release adds code snippets for Amazon Rekognition Custom Labels." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.122.json b/.changes/2.20.122.json new file mode 100644 index 00000000000..8a3dd6ab9d1 --- /dev/null +++ b/.changes/2.20.122.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.122", + "date": "2023-08-08", + "entries": [ + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "This release introduces a new logically air-gapped vault (Preview) in AWS Backup that stores immutable backup copies, which are locked by default and isolated with encryption using AWS owned keys. Logically air-gapped vault (Preview) allows secure recovery of application data across accounts." + }, + { + "type": "feature", + "category": "AWS Service Catalog", + "contributor": "", + "description": "Introduce support for HashiCorp Terraform Cloud in Service Catalog by addying TERRAFORM_CLOUD product type in CreateProduct and CreateProvisioningArtifact API." + }, + { + "type": "feature", + "category": "Amazon ElastiCache", + "contributor": "", + "description": "Added support for cluster mode in online migration and test migration API" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.123.json b/.changes/2.20.123.json new file mode 100644 index 00000000000..48a9bf7369b --- /dev/null +++ b/.changes/2.20.123.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.123", + "date": "2023-08-09", + "entries": [ + { + "type": "feature", + "category": "AWS Global Accelerator", + "contributor": "", + "description": "Documentation update for dualstack EC2 endpoint support" + }, + { + "type": "feature", + "category": "Amazon Chime SDK Voice", + "contributor": "", + "description": "Updating CreatePhoneNumberOrder, UpdatePhoneNumber and BatchUpdatePhoneNumbers APIs, adding phone number name" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "For FSx for Lustre, add new data repository task type, RELEASE_DATA_FROM_FILESYSTEM, to release files that have been archived to S3. For FSx for Windows, enable support for configuring and updating SSD IOPS, and for updating storage type. For FSx for OpenZFS, add new deployment type, MULTI_AZ_1." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "contributor": "", + "description": "Added autoEnable ALL to UpdateOrganizationConfiguration and DescribeOrganizationConfiguration APIs." + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Allow users to configure upload threshold size for AWS CRT-based S3 client via `S3CrtAsyncClientBuilder#thresholdInBytes`." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release adds support for cross account access for SageMaker Model Cards through AWS RAM." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.124.json b/.changes/2.20.124.json new file mode 100644 index 00000000000..bb85d621d24 --- /dev/null +++ b/.changes/2.20.124.json @@ -0,0 +1,60 @@ +{ + "version": "2.20.124", + "date": "2023-08-10", + "entries": [ + { + "type": "bugfix", + "category": "AWS DynamoDB Enhanced Client", + "contributor": "", + "description": "Fix for Issue [#4156](https://github.com/aws/aws-sdk-java-v2/issues/4156) : Single quotes in toJson conversions for EnhancedDocuments are no longer being escaped." + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fixed bug where limit was not copied over when cloning ByteBuffer using immutableCopyOf()" + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "Documentation updates for CloudTrail." + }, + { + "type": "feature", + "category": "AWS Secrets Manager", + "contributor": "", + "description": "Add additional InvalidRequestException to list of possible exceptions for ListSecret." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "Documentation updates for AW Transfer Family" + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds APIs to provision agents that are global / available in multiple AWS regions and distribute them across these regions by percentage." + }, + { + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "This release adds instanceType to GetRunTask & ListRunTasks responses." + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "This release enables configuring security groups for Network Load Balancers" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.125.json b/.changes/2.20.125.json new file mode 100644 index 00000000000..be4785cc08c --- /dev/null +++ b/.changes/2.20.125.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.125", + "date": "2023-08-11", + "entries": [ + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "Updated ResourceType enum with new resource types onboarded by AWS Config in July 2023." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Amazon EC2 P5 instances, powered by the latest NVIDIA H100 Tensor Core GPUs, deliver the highest performance in EC2 for deep learning (DL) and HPC applications. M7i-flex and M7i instances are next-generation general purpose instances powered by custom 4th Generation Intel Xeon Scalable processors." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "New Authentication method for Account subscription - IAM Identity Center. Hierarchy layout support, default column width support and related style properties for pivot table visuals. Non-additive topic field aggregations for Topic API" + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "contributor": "", + "description": "Doc only updates to include: 1) Clarified which part of an email address where it's okay to have Punycode when it contains non-ASCII characters for the SendRawEmail action and other actions where this is applicable. 2) Updated S3Action description with new MB max bucket size from 30 to 40." + }, + { + "type": "feature", + "category": "Amazon Simple Workflow Service", + "contributor": "", + "description": "This release adds new API parameters to override workflow task list for workflow executions." + }, + { + "type": "feature", + "category": "AmplifyBackend", + "contributor": "", + "description": "Adds sensitive trait to required input shapes." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/bugfix-S3TransferManager-d9c09d4.json b/.changes/next-release/bugfix-S3TransferManager-d9c09d4.json deleted file mode 100644 index 0b93e7f4040..00000000000 --- a/.changes/next-release/bugfix-S3TransferManager-d9c09d4.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "type": "bugfix", - "category": "S3 Transfer Manager", - "contributor": "", - "description": "Fix a bug where the SSE-C parameters are not copied to the CompleteMultipartUpload request when transforming to a multipart copy." -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f7570d660d..7e409994228 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,511 @@ +# __2.20.125__ __2023-08-11__ +## __AWS Config__ + - ### Features + - Updated ResourceType enum with new resource types onboarded by AWS Config in July 2023. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 P5 instances, powered by the latest NVIDIA H100 Tensor Core GPUs, deliver the highest performance in EC2 for deep learning (DL) and HPC applications. M7i-flex and M7i instances are next-generation general purpose instances powered by custom 4th Generation Intel Xeon Scalable processors. + +## __Amazon QuickSight__ + - ### Features + - New Authentication method for Account subscription - IAM Identity Center. Hierarchy layout support, default column width support and related style properties for pivot table visuals. Non-additive topic field aggregations for Topic API + +## __Amazon Simple Email Service__ + - ### Features + - Doc only updates to include: 1) Clarified which part of an email address where it's okay to have Punycode when it contains non-ASCII characters for the SendRawEmail action and other actions where this is applicable. 2) Updated S3Action description with new MB max bucket size from 30 to 40. + +## __Amazon Simple Workflow Service__ + - ### Features + - This release adds new API parameters to override workflow task list for workflow executions. + +## __AmplifyBackend__ + - ### Features + - Adds sensitive trait to required input shapes. + +# __2.20.124__ __2023-08-10__ +## __AWS CloudTrail__ + - ### Features + - Documentation updates for CloudTrail. + +## __AWS DynamoDB Enhanced Client__ + - ### Bugfixes + - Fix for Issue [#4156](https://github.com/aws/aws-sdk-java-v2/issues/4156) : Single quotes in toJson conversions for EnhancedDocuments are no longer being escaped. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Fixed bug where limit was not copied over when cloning ByteBuffer using immutableCopyOf() + +## __AWS Secrets Manager__ + - ### Features + - Add additional InvalidRequestException to list of possible exceptions for ListSecret. + +## __AWS Transfer Family__ + - ### Features + - Documentation updates for AW Transfer Family + +## __Amazon Connect Service__ + - ### Features + - This release adds APIs to provision agents that are global / available in multiple AWS regions and distribute them across these regions by percentage. + +## __Amazon Omics__ + - ### Features + - This release adds instanceType to GetRunTask & ListRunTasks responses. + +## __Elastic Load Balancing__ + - ### Features + - This release enables configuring security groups for Network Load Balancers + +# __2.20.123__ __2023-08-09__ +## __AWS Global Accelerator__ + - ### Features + - Documentation update for dualstack EC2 endpoint support + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Chime SDK Voice__ + - ### Features + - Updating CreatePhoneNumberOrder, UpdatePhoneNumber and BatchUpdatePhoneNumbers APIs, adding phone number name + +## __Amazon FSx__ + - ### Features + - For FSx for Lustre, add new data repository task type, RELEASE_DATA_FROM_FILESYSTEM, to release files that have been archived to S3. For FSx for Windows, enable support for configuring and updating SSD IOPS, and for updating storage type. For FSx for OpenZFS, add new deployment type, MULTI_AZ_1. + +## __Amazon GuardDuty__ + - ### Features + - Added autoEnable ALL to UpdateOrganizationConfiguration and DescribeOrganizationConfiguration APIs. + +## __Amazon S3__ + - ### Features + - Allow users to configure upload threshold size for AWS CRT-based S3 client via `S3CrtAsyncClientBuilder#thresholdInBytes`. + +## __Amazon SageMaker Service__ + - ### Features + - This release adds support for cross account access for SageMaker Model Cards through AWS RAM. + +# __2.20.122__ __2023-08-08__ +## __AWS Backup__ + - ### Features + - This release introduces a new logically air-gapped vault (Preview) in AWS Backup that stores immutable backup copies, which are locked by default and isolated with encryption using AWS owned keys. Logically air-gapped vault (Preview) allows secure recovery of application data across accounts. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS Service Catalog__ + - ### Features + - Introduce support for HashiCorp Terraform Cloud in Service Catalog by addying TERRAFORM_CLOUD product type in CreateProduct and CreateProvisioningArtifact API. + +## __Amazon ElastiCache__ + - ### Features + - Added support for cluster mode in online migration and test migration API + +# __2.20.121__ __2023-08-07__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Detective__ + - ### Features + - Updated the email validation regex to be in line with the TLD name specifications. + +## __Amazon Interactive Video Service RealTime__ + - ### Features + - Add QUOTA_EXCEEDED and PUBLISHER_NOT_FOUND to EventErrorCode for stage health events. + +## __Amazon Kinesis Video Streams__ + - ### Features + - This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature. + +## __Amazon Kinesis Video Streams Archived Media__ + - ### Features + - This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature. + +## __Amazon Rekognition__ + - ### Features + - This release adds code snippets for Amazon Rekognition Custom Labels. + +# __2.20.120__ __2023-08-04__ +## __AWS Certificate Manager Private Certificate Authority__ + - ### Features + - Documentation correction for AWS Private CA + +## __AWS DataSync__ + - ### Features + - Display cloud storage used capacity at a cluster level. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Connect Service__ + - ### Features + - Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile. + +## __Amazon EC2 Container Service__ + - ### Features + - This is a documentation update to address various tickets. + +## __Amazon SageMaker Service__ + - ### Features + - Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object + +# __2.20.119__ __2023-08-03__ +## __AWS Cloud9__ + - ### Features + - Updated the deprecation date for Amazon Linux. Doc only update. + +## __AWS Database Migration Service__ + - ### Features + - The release makes public API for DMS Schema Conversion feature. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds new parameter isPrimaryIPv6 to allow assigning an IPv6 address as a primary IPv6 address to a network interface which cannot be changed to give equivalent functionality available for network interfaces with primary IPv4 address. + +## __Amazon S3__ + - ### Bugfixes + - Add `Expect: 100-continue` header for `UploadPartRequest` so that a upload part request can fail faster if there is a server error. + +## __Amazon SageMaker Service__ + - ### Features + - Amazon SageMaker now supports running training jobs on p5.48xlarge instance types. + +## __Auto Scaling__ + - ### Features + - Documentation changes related to Amazon EC2 Auto Scaling APIs. + +# __2.20.118__ __2023-08-02__ +## __AWS Budgets__ + - ### Features + - As part of CAE tagging integration we need to update our budget names regex filter to prevent customers from using "/action/" in their budget names. + +## __AWS Glue__ + - ### Features + - This release includes additional Glue Streaming KAKFA SASL property types. + +## __AWS Resilience Hub__ + - ### Features + - Drift Detection capability added when applications policy has moved from a meet to breach state. Customers will be able to exclude operational recommendations and receive credit in their resilience score. Customers can now add ARH permissions to an existing or new role. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Cognito Identity Provider__ + - ### Features + - New feature that logs Cognito user pool error messages to CloudWatch logs. + +## __Amazon SageMaker Service__ + - ### Features + - SageMaker Inference Recommender introduces a new API GetScalingConfigurationRecommendation to recommend auto scaling policies based on completed Inference Recommender jobs. + +# __2.20.117__ __2023-08-01__ +## __AWS Batch__ + - ### Features + - This release adds support for price capacity optimized allocation strategy for Spot Instances. + +## __AWS Database Migration Service__ + - ### Features + - Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version. + +## __AWS Elemental MediaLive__ + - ### Features + - AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon CloudWatch Internet Monitor__ + - ### Features + - This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only. + +## __Amazon Relational Database Service__ + - ### Features + - Added support for deleted clusters PiTR. + +## __Amazon SageMaker Service__ + - ### Features + - Add Stairs TrafficPattern and FlatInvocations to RecommendationJobStoppingConditions + +# __2.20.116__ __2023-07-31__ +## __AWS Amplify UI Builder__ + - ### Features + - Amplify Studio releases GraphQL support for codegen job action. + +## __AWS Clean Rooms Service__ + - ### Features + - This release introduces custom SQL queries - an expanded set of SQL you can run. This release adds analysis templates, a new resource for storing pre-defined custom SQL queries ahead of time. This release also adds the Custom analysis rule, which lets you approve analysis templates for querying. + +## __AWS CodeStar connections__ + - ### Features + - New integration with the Gitlab provider type. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon EventBridge Scheduler__ + - ### Features + - This release introduces automatic deletion of schedules in EventBridge Scheduler. If configured, EventBridge Scheduler automatically deletes a schedule after the schedule has completed its last invocation. + +## __Amazon Lookout for Equipment__ + - ### Features + - This release includes new import resource, model versioning and resource policy features. + +## __Amazon Omics__ + - ### Features + - Add CreationType filter for ListReadSets + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for Aurora MySQL local write forwarding, which allows for forwarding of write operations from reader DB instances to the writer DB instance. + +## __Amazon Route 53__ + - ### Features + - Amazon Route 53 now supports the Israel (Tel Aviv) Region (il-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +## __Auto Scaling__ + - ### Features + - You can now configure an instance refresh to set its status to 'failed' when it detects that a specified CloudWatch alarm has gone into the ALARM state. You can also choose to roll back the instance refresh automatically when the alarm threshold is met. + +## __Elastic Disaster Recovery Service__ + - ### Features + - Add support for in-aws right sizing + +## __Inspector2__ + - ### Features + - This release adds 1 new API: BatchGetFindingDetails to retrieve enhanced vulnerability intelligence details for findings. + +# __2.20.115__ __2023-07-28__ +## __AWS CloudFormation__ + - ### Features + - This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon CloudFront__ + - ### Features + - Add a new JavaScript runtime version for CloudFront Functions. + +## __Amazon CloudWatch Application Insights__ + - ### Features + - This release enable customer to add/remove/update more than one workload for a component + +## __Amazon Connect Service__ + - ### Features + - This release adds support for new number types. + +## __Amazon Pinpoint__ + - ### Features + - Added support for sending push notifications using the FCM v1 API with json credentials. Amazon Pinpoint customers can now deliver messages to Android devices using both FCM v1 API and the legacy FCM/GCM API + +## __Managed Streaming for Kafka__ + - ### Features + - Amazon MSK has introduced new versions of ListClusterOperations and DescribeClusterOperation APIs. These v2 APIs provide information and insights into the ongoing operations of both MSK Provisioned and MSK Serverless clusters. + +# __2.20.114__ __2023-07-27__ +## __Amazon Simple Queue Service__ + - ### Features + - Documentation changes related to SQS APIs. + +# __2.20.113__ __2023-07-27__ +## __Amazon Elastic Block Store__ + - ### Features + - SDK and documentation updates for Amazon Elastic Block Store API + +## __Amazon Elastic Compute Cloud__ + - ### Features + - SDK and documentation updates for Amazon Elastic Block Store APIs + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Add multiple customer error code to handle customer caused failure when managing EKS node groups + +## __Amazon SageMaker Service__ + - ### Features + - Expose ProfilerConfig attribute in SageMaker Search API response. + +## __Auto Scaling__ + - ### Features + - This release updates validation for instance types used in the AllowedInstanceTypes and ExcludedInstanceTypes parameters of the InstanceRequirements property of a MixedInstancesPolicy. + +# __2.20.112__ __2023-07-26__ +## __AWS Cloud Control API__ + - ### Features + - Updates the documentation for CreateResource. + +## __AWS Elemental MediaConvert__ + - ### Features + - This release includes general updates to user documentation. + +## __AWS EntityResolution__ + - ### Features + - AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information. + +## __AWS Glue__ + - ### Features + - Release Glue Studio Snowflake Connector Node for SDK/CLI + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon HealthLake__ + - ### Features + - Updating the HealthLake service documentation. + +## __Amazon Managed Blockchain Query__ + - ### Features + - Amazon Managed Blockchain (AMB) Query provides serverless access to standardized, multi-blockchain datasets with developer-friendly APIs. + +## __Amazon Omics__ + - ### Features + - The service is renaming as a part of AWS Health. + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds 1 new voice - Lisa (nl-BE) + +## __Amazon Route 53__ + - ### Features + - Update that corrects the documents for received feedback. + +## __OpenSearch Service Serverless__ + - ### Features + - This release adds new collection type VectorSearch. + +# __2.20.111__ __2023-07-25__ +## __AWS DataSync__ + - ### Features + - AWS DataSync now supports Microsoft Azure Blob Storage locations. + +## __AWS IAM Policy Builder__ + - ### Bugfixes + - Fixed bug where actions were written instead of resources. + +## __AWS Lambda__ + - ### Features + - Add Python 3.11 (python3.11) support to AWS Lambda + +## __AWS Security Token Service__ + - ### Features + - API updates for the AWS Security Token Service + +## __AWS SecurityHub__ + - ### Features + - Add support for CONTAINS and NOT_CONTAINS comparison operators for Automation Rules string filters and map filters + +## __AWS Transfer Family__ + - ### Features + - This release adds support for SFTP Connectors. + +## __AWSBillingConductor__ + - ### Features + - Added support for Auto-Assocate Billing Groups for CreateBillingGroup, UpdateBillingGroup, and ListBillingGroups. + +## __Amazon Connect Customer Profiles__ + - ### Features + - Amazon Connect Customer Profiles now supports rule-based resolution to match and merge similar profiles into unified profiles, helping companies deliver faster and more personalized customer service by providing access to relevant customer information for agents and automated experiences. + +## __Amazon Connect Wisdom Service__ + - ### Features + - This release added two new data types: AssistantIntegrationConfiguration, and SessionIntegrationConfiguration to support Wisdom integration with Amazon Connect Chat + +## __Amazon DynamoDB__ + - ### Features + - Documentation updates for DynamoDB + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds an instance's peak and baseline network bandwidth as well as the memory sizes of an instance's inference accelerators to DescribeInstanceTypes. + +## __Amazon Relational Database Service__ + - ### Features + - This release adds support for monitoring storage optimization progress on the DescribeDBInstances API. + +## __Amazon SageMaker Service__ + - ### Features + - Mark ContentColumn and TargetLabelColumn as required Targets in TextClassificationJobConfig in CreateAutoMLJobV2API + +## __EMR Serverless__ + - ### Features + - This release adds support for publishing application logs to CloudWatch. + +# __2.20.110__ __2023-07-24__ +## __AWS CloudFormation__ + - ### Features + - This release supports filtering by DRIFT_STATUS for existing API ListStackInstances and adds support for a new API ListStackInstanceResourceDrifts. Customers can now view resource drift information from their StackSet management accounts. + +## __AWS Cost Explorer Service__ + - ### Features + - This release introduces the new API 'GetSavingsPlanPurchaseRecommendationDetails', which retrieves the details for a Savings Plan recommendation. It also updates the existing API 'GetSavingsPlansPurchaseRecommendation' to include the recommendation detail ID. + +## __AWS Glue__ + - ### Features + - Added support for Data Preparation Recipe node in Glue Studio jobs + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Chime SDK Media Pipelines__ + - ### Features + - AWS Media Pipeline compositing enhancement and Media Insights Pipeline auto language identification. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Add "disabled" enum value to SpotInstanceState. + +## __Amazon QuickSight__ + - ### Features + - This release launches new Snapshot APIs for CSV and PDF exports, adds support for info icon for filters and parameters in Exploration APIs, adds modeled exception to the DeleteAccountCustomization API, and introduces AttributeAggregationFunction's ability to add UNIQUE_VALUE aggregation in tooltips. + +## __AmazonApiGatewayV2__ + - ### Features + - Documentation updates for Amazon API Gateway. + +# __2.20.109__ __2023-07-21__ +## __AWS Elemental MediaConvert__ + - ### Features + - This release includes improvements to Preserve 444 handling, compatibility of HEVC sources without frame rates, and general improvements to MP4 outputs. + +## __AWS Glue__ + - ### Features + - This release adds support for AWS Glue Crawler with Apache Hudi Tables, allowing Crawlers to discover Hudi Tables in S3 and register them in Glue Data Catalog for query engines to query against. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Relational Database Service__ + - ### Features + - Adds support for the DBSystemID parameter of CreateDBInstance to RDS Custom for Oracle. + +## __Amazon WorkSpaces__ + - ### Features + - Fixed VolumeEncryptionKey descriptions + +## __S3 Transfer Manager__ + - ### Bugfixes + - Fix a bug where the SSE-C parameters are not copied to the CompleteMultipartUpload request when transforming to a multipart copy. + # __2.20.108__ __2023-07-20__ ## __Amazon CodeCatalyst__ - ### Features @@ -676,7 +1184,7 @@ Special thanks to the following contributors to this release: ## __Contributors__ Special thanks to the following contributors to this release: -[@bmaizels](https://github.com/bmaizels), [@breader124](https://github.com/breader124) +[@breader124](https://github.com/breader124), [@bmaizels](https://github.com/bmaizels) # __2.20.85__ __2023-06-13__ ## __AWS CloudTrail__ - ### Features diff --git a/README.md b/README.md index 24404a45552..e5589f0ea26 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Gitter](https://badges.gitter.im/aws/aws-sdk-java-v2.svg)](https://gitter.im/aws/aws-sdk-java-v2?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![codecov](https://codecov.io/gh/aws/aws-sdk-java-v2/branch/master/graph/badge.svg)](https://codecov.io/gh/aws/aws-sdk-java-v2) -[![All Contributors](https://img.shields.io/badge/all_contributors-91-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-93-orange.svg?style=flat-square)](#contributors-) The **AWS SDK for Java 2.0** is a rewrite of 1.0 with some great new features. As with version 1.0, @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.20.108 + 2.20.125 pom import @@ -86,12 +86,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.20.108 + 2.20.125 software.amazon.awssdk s3 - 2.20.108 + 2.20.125 ``` @@ -103,7 +103,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.20.108 + 2.20.125 ``` @@ -308,6 +308,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d David Negrete
David Negrete

💻 Stephen Flavin
Stephen Flavin

💻 + + Olivier L Applin
Olivier L Applin

💻 + Adrian Chlebosz
Adrian Chlebosz

💻 + diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 1a89cd36769..3f1e05ce2f6 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index 61ca3de3621..2f26b4591b6 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index bbb03dbe96b..ba43c242541 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/archetypes/pom.xml b/archetypes/pom.xml index 6b93ec337bf..8948948ed7d 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index a914e7a39bb..9330b8937e7 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../pom.xml aws-sdk-java @@ -1778,6 +1778,16 @@ Amazon AutoScaling, etc). medicalimaging ${awsjavasdk.version} + + software.amazon.awssdk + entityresolution + ${awsjavasdk.version} + + + software.amazon.awssdk + managedblockchainquery + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index 4dd1c8fff02..4cc8aebcb6e 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/bom/pom.xml b/bom/pom.xml index cd5937747e2..a8f66acbe38 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../pom.xml bom @@ -1928,6 +1928,16 @@ medicalimaging ${awsjavasdk.version} + + software.amazon.awssdk + entityresolution + ${awsjavasdk.version} + + + software.amazon.awssdk + managedblockchainquery + ${awsjavasdk.version} + diff --git a/bundle/pom.xml b/bundle/pom.xml index afd9c2145c1..4f39da56f12 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 92a34c3997b..98612d5457c 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index b9892efbe71..e2bc275177f 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index f77dcf03098..7ac847882e3 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index 47e68f02e17..f0c2a0650b4 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codegen AWS Java SDK :: Code Generator diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index 596d44bcf14..0bef67df786 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -227,6 +227,11 @@ public class CustomizationConfig { */ private String asyncClientDecorator; + /** + * Only for s3. A set of customization to related to multipart operations. + */ + private MultipartCustomization multipartCustomization; + /** * Whether to skip generating endpoint tests from endpoint-tests.json */ @@ -665,4 +670,12 @@ public Map getCustomClientContextParams() { public void setCustomClientContextParams(Map customClientContextParams) { this.customClientContextParams = customClientContextParams; } + + public MultipartCustomization getMultipartCustomization() { + return this.multipartCustomization; + } + + public void setMultipartCustomization(MultipartCustomization multipartCustomization) { + this.multipartCustomization = multipartCustomization; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/MultipartCustomization.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/MultipartCustomization.java new file mode 100644 index 00000000000..94264a9e5ec --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/MultipartCustomization.java @@ -0,0 +1,64 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.config.customization; + +public class MultipartCustomization { + private String multipartConfigurationClass; + private String multipartConfigMethodDoc; + private String multipartEnableMethodDoc; + private String contextParamEnabledKey; + private String contextParamConfigKey; + + public String getMultipartConfigurationClass() { + return multipartConfigurationClass; + } + + public void setMultipartConfigurationClass(String multipartConfigurationClass) { + this.multipartConfigurationClass = multipartConfigurationClass; + } + + public String getMultipartConfigMethodDoc() { + return multipartConfigMethodDoc; + } + + public void setMultipartConfigMethodDoc(String multipartMethodDoc) { + this.multipartConfigMethodDoc = multipartMethodDoc; + } + + public String getMultipartEnableMethodDoc() { + return multipartEnableMethodDoc; + } + + public void setMultipartEnableMethodDoc(String multipartEnableMethodDoc) { + this.multipartEnableMethodDoc = multipartEnableMethodDoc; + } + + public String getContextParamEnabledKey() { + return contextParamEnabledKey; + } + + public void setContextParamEnabledKey(String contextParamEnabledKey) { + this.contextParamEnabledKey = contextParamEnabledKey; + } + + public String getContextParamConfigKey() { + return contextParamConfigKey; + } + + public void setContextParamConfigKey(String contextParamConfigKey) { + this.contextParamConfigKey = contextParamConfigKey; + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/ClassSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/ClassSpec.java index a8265f0dc7f..59a719fb2c7 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/ClassSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/ClassSpec.java @@ -20,7 +20,7 @@ import java.util.Collections; /** - * Represents the a Poet generated class + * Represents a Poet generated class */ public interface ClassSpec { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java index 509a30c6c8d..3ff2b99ec98 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderClass.java @@ -17,6 +17,7 @@ import com.squareup.javapoet.ClassName; import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.ParameterSpec; import com.squareup.javapoet.ParameterizedTypeName; import com.squareup.javapoet.TypeSpec; import java.net.URI; @@ -24,6 +25,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.token.credentials.SdkTokenProvider; import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.codegen.model.config.customization.MultipartCustomization; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.poet.ClassSpec; import software.amazon.awssdk.codegen.poet.PoetExtension; @@ -59,12 +61,12 @@ public AsyncClientBuilderClass(IntermediateModel model) { @Override public TypeSpec poetSpec() { TypeSpec.Builder builder = - PoetUtils.createClassBuilder(builderClassName) - .addAnnotation(SdkInternalApi.class) - .addModifiers(Modifier.FINAL) - .superclass(ParameterizedTypeName.get(builderBaseClassName, builderInterfaceName, clientInterfaceName)) - .addSuperinterface(builderInterfaceName) - .addJavadoc("Internal implementation of {@link $T}.", builderInterfaceName); + PoetUtils.createClassBuilder(builderClassName) + .addAnnotation(SdkInternalApi.class) + .addModifiers(Modifier.FINAL) + .superclass(ParameterizedTypeName.get(builderBaseClassName, builderInterfaceName, clientInterfaceName)) + .addSuperinterface(builderInterfaceName) + .addJavadoc("Internal implementation of {@link $T}.", builderInterfaceName); if (model.getEndpointOperation().isPresent()) { builder.addMethod(endpointDiscoveryEnabled()); @@ -80,6 +82,12 @@ public TypeSpec poetSpec() { builder.addMethod(bearerTokenProviderMethod()); } + MultipartCustomization multipartCustomization = model.getCustomizationConfig().getMultipartCustomization(); + if (multipartCustomization != null) { + builder.addMethod(multipartEnabledMethod(multipartCustomization)); + builder.addMethod(multipartConfigMethods(multipartCustomization)); + } + builder.addMethod(buildClientMethod()); builder.addMethod(initializeServiceClientConfigMethod()); @@ -124,15 +132,15 @@ private MethodSpec endpointProviderMethod() { private MethodSpec buildClientMethod() { MethodSpec.Builder builder = MethodSpec.methodBuilder("buildClient") - .addAnnotation(Override.class) - .addModifiers(Modifier.PROTECTED, Modifier.FINAL) - .returns(clientInterfaceName) - .addStatement("$T clientConfiguration = super.asyncClientConfiguration()", - SdkClientConfiguration.class).addStatement("this.validateClientOptions" - + "(clientConfiguration)") - .addStatement("$T serviceClientConfiguration = initializeServiceClientConfig" - + "(clientConfiguration)", - serviceConfigClassName); + .addAnnotation(Override.class) + .addModifiers(Modifier.PROTECTED, Modifier.FINAL) + .returns(clientInterfaceName) + .addStatement("$T clientConfiguration = super.asyncClientConfiguration()", + SdkClientConfiguration.class) + .addStatement("this.validateClientOptions(clientConfiguration)") + .addStatement("$T serviceClientConfiguration = initializeServiceClientConfig" + + "(clientConfiguration)", + serviceConfigClassName); builder.addStatement("$1T client = new $2T(serviceClientConfiguration, clientConfiguration)", clientInterfaceName, clientClassName); @@ -156,6 +164,32 @@ private MethodSpec bearerTokenProviderMethod() { .build(); } + private MethodSpec multipartEnabledMethod(MultipartCustomization multipartCustomization) { + return MethodSpec.methodBuilder("multipartEnabled") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(builderInterfaceName) + .addParameter(Boolean.class, "enabled") + .addStatement("clientContextParams.put($N, enabled)", + multipartCustomization.getContextParamEnabledKey()) + .addStatement("return this") + .build(); + } + + private MethodSpec multipartConfigMethods(MultipartCustomization multipartCustomization) { + ClassName mulitpartConfigClassName = + PoetUtils.classNameFromFqcn(multipartCustomization.getMultipartConfigurationClass()); + return MethodSpec.methodBuilder("multipartConfiguration") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .addParameter(ParameterSpec.builder(mulitpartConfigClassName, "multipartConfig").build()) + .returns(builderInterfaceName) + .addStatement("clientContextParams.put($N, multipartConfig)", + multipartCustomization.getContextParamConfigKey()) + .addStatement("return this") + .build(); + } + private MethodSpec initializeServiceClientConfigMethod() { return MethodSpec.methodBuilder("initializeServiceClientConfig").addModifiers(Modifier.PRIVATE) .addParameter(SdkClientConfiguration.class, "clientConfig") diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderInterface.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderInterface.java index 5348972b5df..df62f97ae7c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderInterface.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/AsyncClientBuilderInterface.java @@ -17,34 +17,97 @@ import com.squareup.javapoet.ClassName; import com.squareup.javapoet.CodeBlock; +import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.ParameterSpec; import com.squareup.javapoet.ParameterizedTypeName; import com.squareup.javapoet.TypeSpec; +import java.util.function.Consumer; +import javax.lang.model.element.Modifier; import software.amazon.awssdk.awscore.client.builder.AwsAsyncClientBuilder; +import software.amazon.awssdk.codegen.model.config.customization.MultipartCustomization; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.poet.ClassSpec; import software.amazon.awssdk.codegen.poet.PoetUtils; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; public class AsyncClientBuilderInterface implements ClassSpec { + private static final Logger log = Logger.loggerFor(AsyncClientBuilderInterface.class); + private final ClassName builderInterfaceName; private final ClassName clientInterfaceName; private final ClassName baseBuilderInterfaceName; + private final IntermediateModel model; public AsyncClientBuilderInterface(IntermediateModel model) { String basePackage = model.getMetadata().getFullClientPackageName(); this.clientInterfaceName = ClassName.get(basePackage, model.getMetadata().getAsyncInterface()); this.builderInterfaceName = ClassName.get(basePackage, model.getMetadata().getAsyncBuilderInterface()); this.baseBuilderInterfaceName = ClassName.get(basePackage, model.getMetadata().getBaseBuilderInterface()); + this.model = model; } @Override public TypeSpec poetSpec() { - return PoetUtils.createInterfaceBuilder(builderInterfaceName) - .addSuperinterface(ParameterizedTypeName.get(ClassName.get(AwsAsyncClientBuilder.class), - builderInterfaceName, clientInterfaceName)) - .addSuperinterface(ParameterizedTypeName.get(baseBuilderInterfaceName, - builderInterfaceName, clientInterfaceName)) - .addJavadoc(getJavadoc()) - .build(); + TypeSpec.Builder builder = PoetUtils + .createInterfaceBuilder(builderInterfaceName) + .addSuperinterface(ParameterizedTypeName.get(ClassName.get(AwsAsyncClientBuilder.class), + builderInterfaceName, clientInterfaceName)) + .addSuperinterface(ParameterizedTypeName.get(baseBuilderInterfaceName, + builderInterfaceName, clientInterfaceName)) + .addJavadoc(getJavadoc()); + + MultipartCustomization multipartCustomization = model.getCustomizationConfig().getMultipartCustomization(); + if (multipartCustomization != null) { + includeMultipartMethod(builder, multipartCustomization); + } + return builder.build(); + } + + private void includeMultipartMethod(TypeSpec.Builder builder, MultipartCustomization multipartCustomization) { + log.debug(() -> String.format("Adding multipart config methods to builder interface for service '%s'", + model.getMetadata().getServiceId())); + + // .multipartEnabled(Boolean) + builder.addMethod( + MethodSpec.methodBuilder("multipartEnabled") + .addModifiers(Modifier.DEFAULT, Modifier.PUBLIC) + .returns(builderInterfaceName) + .addParameter(Boolean.class, "enabled") + .addCode("throw new $T();", UnsupportedOperationException.class) + .addJavadoc(CodeBlock.of(multipartCustomization.getMultipartEnableMethodDoc())) + .build()); + + // .multipartConfiguration(MultipartConfiguration) + String multiPartConfigMethodName = "multipartConfiguration"; + String multipartConfigClass = Validate.notNull(multipartCustomization.getMultipartConfigurationClass(), + "'multipartConfigurationClass' must be defined"); + ClassName mulitpartConfigClassName = PoetUtils.classNameFromFqcn(multipartConfigClass); + builder.addMethod( + MethodSpec.methodBuilder(multiPartConfigMethodName) + .addModifiers(Modifier.DEFAULT, Modifier.PUBLIC) + .returns(builderInterfaceName) + .addParameter(ParameterSpec.builder(mulitpartConfigClassName, "multipartConfiguration").build()) + .addCode("throw new $T();", UnsupportedOperationException.class) + .addJavadoc(CodeBlock.of(multipartCustomization.getMultipartConfigMethodDoc())) + .build()); + + // .multipartConfiguration(Consumer) + ClassName mulitpartConfigBuilderClassName = PoetUtils.classNameFromFqcn(multipartConfigClass + ".Builder"); + ParameterizedTypeName consumerBuilderType = ParameterizedTypeName.get(ClassName.get(Consumer.class), + mulitpartConfigBuilderClassName); + builder.addMethod( + MethodSpec.methodBuilder(multiPartConfigMethodName) + .addModifiers(Modifier.DEFAULT, Modifier.PUBLIC) + .returns(builderInterfaceName) + .addParameter(ParameterSpec.builder(consumerBuilderType, "multipartConfiguration").build()) + .addStatement("$T builder = $T.builder()", + mulitpartConfigBuilderClassName, + mulitpartConfigClassName) + .addStatement("multipartConfiguration.accept(builder)") + .addStatement("return multipartConfiguration(builder.build())") + .addJavadoc(CodeBlock.of(multipartCustomization.getMultipartConfigMethodDoc())) + .build()); } @Override diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource index 2018b804f3d..4e0c6f1232f 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource @@ -8,7 +8,7 @@ "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", "regions" : { "af-south-1" : { "description" : "Africa (Cape Town)" @@ -73,6 +73,9 @@ "eu-west-3" : { "description" : "Europe (Paris)" }, + "il-central-1" : { + "description" : "Israel (Tel Aviv)" + }, "me-central-1" : { "description" : "Middle East (UAE)" }, diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index 922d74d8a80..38e32d10232 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 3d611a7e77b..802774018d6 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/auth-crt/pom.xml b/core/auth-crt/pom.xml index 4c9fd1c4bab..91dc602039b 100644 --- a/core/auth-crt/pom.xml +++ b/core/auth-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT auth-crt diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 3e8378cc08d..2e8581589b6 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT auth diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index 3c87ce53cf3..f1911753386 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT aws-core diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponseMetadata.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponseMetadata.java index f9e326f6231..2bbdec695da 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponseMetadata.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsResponseMetadata.java @@ -18,6 +18,7 @@ import static software.amazon.awssdk.awscore.util.AwsHeader.AWS_REQUEST_ID; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -48,7 +49,7 @@ protected AwsResponseMetadata(Map metadata) { } protected AwsResponseMetadata(AwsResponseMetadata responseMetadata) { - this(responseMetadata.metadata); + this(responseMetadata == null ? new HashMap<>() : responseMetadata.metadata); } /** diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsServiceClientConfiguration.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsServiceClientConfiguration.java index 97a20a1e31d..50aadac858b 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsServiceClientConfiguration.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsServiceClientConfiguration.java @@ -69,21 +69,31 @@ public interface Builder extends SdkServiceClientConfiguration.Builder { /** * Return the region */ - Region region(); + default Region region() { + throw new UnsupportedOperationException(); + } /** * Configure the region */ - Builder region(Region region); + default Builder region(Region region) { + throw new UnsupportedOperationException(); + } @Override - Builder overrideConfiguration(ClientOverrideConfiguration clientOverrideConfiguration); + default Builder overrideConfiguration(ClientOverrideConfiguration clientOverrideConfiguration) { + throw new UnsupportedOperationException(); + } @Override - Builder endpointOverride(URI endpointOverride); + default Builder endpointOverride(URI endpointOverride) { + throw new UnsupportedOperationException(); + } @Override - Builder endpointProvider(EndpointProvider endpointProvider); + default Builder endpointProvider(EndpointProvider endpointProvider) { + throw new UnsupportedOperationException(); + } @Override AwsServiceClientConfiguration build(); diff --git a/core/crt-core/pom.xml b/core/crt-core/pom.xml index d2cf2abb821..626813252a3 100644 --- a/core/crt-core/pom.xml +++ b/core/crt-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT crt-core diff --git a/core/endpoints-spi/pom.xml b/core/endpoints-spi/pom.xml index 0e98eed5473..d45d7b52509 100644 --- a/core/endpoints-spi/pom.xml +++ b/core/endpoints-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/imds/pom.xml b/core/imds/pom.xml index 88897ead86a..3ec164ba79d 100644 --- a/core/imds/pom.xml +++ b/core/imds/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 imds diff --git a/core/json-utils/pom.xml b/core/json-utils/pom.xml index d444501f81d..e262f9de0a9 100644 --- a/core/json-utils/pom.xml +++ b/core/json-utils/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index 04586a8efb7..2378f2e38ee 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index 59f58788790..342f3cf9395 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 5da20ac4697..71fed9dd446 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT profiles diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index cbea90521e2..5b36c168871 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index 74cfbdc42b4..e934c54da81 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index 8386023f986..84195df0201 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index 69236c452d4..c1b5b3ef582 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 007bf624e1b..f535b05c9b5 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index 215c3093a99..e29093bd76f 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index cda4a6cfa13..a87ad3caaba 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT regions diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 5062218fc05..a686c926e99 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -21,7 +21,7 @@ "dnsSuffix" : "amazonaws.com", "partition" : "aws", "partitionName" : "AWS Standard", - "regionRegex" : "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", "regions" : { "af-south-1" : { "description" : "Africa (Cape Town)" @@ -83,6 +83,9 @@ "eu-west-3" : { "description" : "Europe (Paris)" }, + "il-central-1" : { + "description" : "Israel (Tel Aviv)" + }, "me-central-1" : { "description" : "Middle East (UAE)" }, @@ -173,6 +176,7 @@ "deprecated" : true, "hostname" : "access-analyzer-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -248,6 +252,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -796,6 +801,12 @@ "deprecated" : true, "hostname" : "ecr-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "api.ecr.il-central-1.amazonaws.com" + }, "me-central-1" : { "credentialScope" : { "region" : "me-central-1" @@ -1086,6 +1097,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1294,6 +1306,7 @@ "deprecated" : true, "hostname" : "apigateway-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1359,6 +1372,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1390,6 +1404,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1444,6 +1459,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1461,16 +1477,20 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1855,6 +1875,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -1902,6 +1923,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "athena.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ap-southeast-1" : { "variants" : [ { "hostname" : "athena.ap-southeast-1.api.aws", @@ -1920,6 +1947,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "athena.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "athena.ca-central-1.api.aws", @@ -1932,6 +1965,12 @@ "tags" : [ "dualstack" ] } ] }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "athena.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "eu-north-1" : { "variants" : [ { "hostname" : "athena.eu-north-1.api.aws", @@ -1944,6 +1983,12 @@ "tags" : [ "dualstack" ] } ] }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "athena.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "eu-west-1" : { "variants" : [ { "hostname" : "athena.eu-west-1.api.aws", @@ -2099,6 +2144,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2547,6 +2593,7 @@ "deprecated" : true, "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2611,6 +2658,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2710,6 +2758,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2783,6 +2832,7 @@ "deprecated" : true, "hostname" : "cloudtrail-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2877,6 +2927,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -3060,6 +3111,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -3713,6 +3765,7 @@ "deprecated" : true, "hostname" : "config-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -3817,6 +3870,8 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, @@ -4529,6 +4584,7 @@ "deprecated" : true, "hostname" : "directconnect-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -4577,6 +4633,7 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, @@ -4638,6 +4695,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -4791,16 +4849,21 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -4871,6 +4934,7 @@ "deprecated" : true, "hostname" : "ds-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -4937,6 +5001,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "local" : { "credentialScope" : { "region" : "us-east-1" @@ -5063,6 +5128,7 @@ "deprecated" : true, "hostname" : "ebs-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -5167,6 +5233,7 @@ "deprecated" : true, "hostname" : "ec2-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { @@ -5260,6 +5327,7 @@ "deprecated" : true, "hostname" : "ecs-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -5356,6 +5424,7 @@ "deprecated" : true, "hostname" : "fips.eks.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -5414,6 +5483,7 @@ "deprecated" : true, "hostname" : "elasticache-fips.us-west-1.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -5517,6 +5587,7 @@ "deprecated" : true, "hostname" : "elasticbeanstalk-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -5807,6 +5878,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.eu-west-3.amazonaws.com" }, + "fips-il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.il-central-1.amazonaws.com" + }, "fips-me-central-1" : { "credentialScope" : { "region" : "me-central-1" @@ -5856,6 +5934,12 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.me-central-1.amazonaws.com", @@ -5953,6 +6037,7 @@ "deprecated" : true, "hostname" : "elasticloadbalancing-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6050,6 +6135,7 @@ "deprecated" : true, "hostname" : "elasticmapreduce-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6143,6 +6229,7 @@ }, "emr-containers" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, @@ -6157,6 +6244,7 @@ }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -6344,6 +6432,7 @@ "deprecated" : true, "hostname" : "es-fips.us-west-1.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6451,6 +6540,7 @@ "deprecated" : true, "hostname" : "events-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6579,6 +6669,7 @@ "deprecated" : true, "hostname" : "firehose-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -7301,9 +7392,11 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -7341,6 +7434,7 @@ "deprecated" : true, "hostname" : "glue-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -7747,6 +7841,7 @@ "identitystore" : { "endpoints" : { "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -7761,9 +7856,11 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -7974,7 +8071,11 @@ "hostname" : "internetmonitor.ap-southeast-4.api.aws" }, "ca-central-1" : { - "hostname" : "internetmonitor.ca-central-1.api.aws" + "hostname" : "internetmonitor.ca-central-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor-fips.ca-central-1.api.aws", + "tags" : [ "fips" ] + } ] }, "eu-central-1" : { "hostname" : "internetmonitor.eu-central-1.api.aws" @@ -8000,6 +8101,9 @@ "eu-west-3" : { "hostname" : "internetmonitor.eu-west-3.api.aws" }, + "il-central-1" : { + "hostname" : "internetmonitor.il-central-1.api.aws" + }, "me-central-1" : { "hostname" : "internetmonitor.me-central-1.api.aws" }, @@ -8010,16 +8114,32 @@ "hostname" : "internetmonitor.sa-east-1.api.aws" }, "us-east-1" : { - "hostname" : "internetmonitor.us-east-1.api.aws" + "hostname" : "internetmonitor.us-east-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor-fips.us-east-1.api.aws", + "tags" : [ "fips" ] + } ] }, "us-east-2" : { - "hostname" : "internetmonitor.us-east-2.api.aws" + "hostname" : "internetmonitor.us-east-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor-fips.us-east-2.api.aws", + "tags" : [ "fips" ] + } ] }, "us-west-1" : { - "hostname" : "internetmonitor.us-west-1.api.aws" + "hostname" : "internetmonitor.us-west-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor-fips.us-west-1.api.aws", + "tags" : [ "fips" ] + } ] }, "us-west-2" : { - "hostname" : "internetmonitor.us-west-2.api.aws" + "hostname" : "internetmonitor.us-west-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor-fips.us-west-2.api.aws", + "tags" : [ "fips" ] + } ] } } }, @@ -8891,6 +9011,9 @@ "eu-west-3" : { "hostname" : "kendra-ranking.eu-west-3.api.aws" }, + "il-central-1" : { + "hostname" : "kendra-ranking.il-central-1.api.aws" + }, "me-central-1" : { "hostname" : "kendra-ranking.me-central-1.api.aws" }, @@ -8976,6 +9099,7 @@ "deprecated" : true, "hostname" : "kinesis-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -9027,6 +9151,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -9325,10 +9450,17 @@ "deprecated" : true, "hostname" : "kms-fips.eu-west-3.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "kms-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "il-central-1-fips" : { "credentialScope" : { "region" : "il-central-1" }, + "deprecated" : true, "hostname" : "kms-fips.il-central-1.amazonaws.com" }, "me-central-1" : { @@ -9432,9 +9564,11 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -9472,6 +9606,7 @@ "deprecated" : true, "hostname" : "lakeformation-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -9651,6 +9786,12 @@ "deprecated" : true, "hostname" : "lambda-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "lambda.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "lambda.me-central-1.api.aws", @@ -9831,6 +9972,7 @@ "deprecated" : true, "hostname" : "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10001,6 +10143,7 @@ "deprecated" : true, "hostname" : "logs-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10607,6 +10750,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10638,6 +10782,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -10877,6 +11022,7 @@ "deprecated" : true, "hostname" : "monitoring-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -11246,6 +11392,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -11683,6 +11830,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -12232,6 +12380,7 @@ "deprecated" : true, "hostname" : "ram-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -12323,6 +12472,7 @@ "deprecated" : true, "hostname" : "rbin-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -12386,6 +12536,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "rds-fips.ca-central-1" : { @@ -12657,6 +12808,7 @@ "deprecated" : true, "hostname" : "redshift-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -12946,6 +13098,9 @@ "eu-west-3" : { "hostname" : "resource-explorer-2.eu-west-3.api.aws" }, + "il-central-1" : { + "hostname" : "resource-explorer-2.il-central-1.api.aws" + }, "sa-east-1" : { "hostname" : "resource-explorer-2.sa-east-1.api.aws" }, @@ -13013,6 +13168,7 @@ "deprecated" : true, "hostname" : "resource-groups-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -13142,6 +13298,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -13253,6 +13410,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -13501,6 +13659,12 @@ "deprecated" : true, "hostname" : "s3-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "s3.dualstack.il-central-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "s3.dualstack.me-central-1.amazonaws.com", @@ -14042,6 +14206,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -14149,6 +14314,7 @@ "deprecated" : true, "hostname" : "securityhub-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -14278,6 +14444,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -14343,9 +14510,11 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", @@ -14353,8 +14522,10 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -14426,67 +14597,67 @@ "endpoints" : { "af-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.af-south-1.amazonaws.com", + "hostname" : "servicediscovery.af-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-east-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-east-1.amazonaws.com", + "hostname" : "servicediscovery.ap-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-1.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-2.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-northeast-3" : { "variants" : [ { - "hostname" : "servicediscovery.ap-northeast-3.amazonaws.com", + "hostname" : "servicediscovery.ap-northeast-3.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-south-1.amazonaws.com", + "hostname" : "servicediscovery.ap-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-south-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-south-2.amazonaws.com", + "hostname" : "servicediscovery.ap-south-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-1" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-1.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-2" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-2.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-2.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-3" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-3.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-3.api.aws", "tags" : [ "dualstack" ] } ] }, "ap-southeast-4" : { "variants" : [ { - "hostname" : "servicediscovery.ap-southeast-4.amazonaws.com", + "hostname" : "servicediscovery.ap-southeast-4.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14495,7 +14666,10 @@ "hostname" : "servicediscovery-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.ca-central-1.amazonaws.com", + "hostname" : "servicediscovery-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14508,67 +14682,73 @@ }, "eu-central-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-central-1.amazonaws.com", + "hostname" : "servicediscovery.eu-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-central-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-central-2.amazonaws.com", + "hostname" : "servicediscovery.eu-central-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-north-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-north-1.amazonaws.com", + "hostname" : "servicediscovery.eu-north-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-south-1.amazonaws.com", + "hostname" : "servicediscovery.eu-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-south-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-south-2.amazonaws.com", + "hostname" : "servicediscovery.eu-south-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-1" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-1.amazonaws.com", + "hostname" : "servicediscovery.eu-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-2" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-2.amazonaws.com", + "hostname" : "servicediscovery.eu-west-2.api.aws", "tags" : [ "dualstack" ] } ] }, "eu-west-3" : { "variants" : [ { - "hostname" : "servicediscovery.eu-west-3.amazonaws.com", + "hostname" : "servicediscovery.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "il-central-1" : { + "variants" : [ { + "hostname" : "servicediscovery.il-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "me-central-1" : { "variants" : [ { - "hostname" : "servicediscovery.me-central-1.amazonaws.com", + "hostname" : "servicediscovery.me-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "me-south-1" : { "variants" : [ { - "hostname" : "servicediscovery.me-south-1.amazonaws.com", + "hostname" : "servicediscovery.me-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, "sa-east-1" : { "variants" : [ { - "hostname" : "servicediscovery.sa-east-1.amazonaws.com", + "hostname" : "servicediscovery.sa-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14577,7 +14757,10 @@ "hostname" : "servicediscovery-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-east-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14593,7 +14776,10 @@ "hostname" : "servicediscovery-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-east-2.amazonaws.com", + "hostname" : "servicediscovery-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-east-2.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14609,7 +14795,10 @@ "hostname" : "servicediscovery-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-west-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14625,7 +14814,10 @@ "hostname" : "servicediscovery-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-west-2.amazonaws.com", + "hostname" : "servicediscovery-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-west-2.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -14836,41 +15028,6 @@ }, "sms" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "fips-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "sms-fips.us-east-1.amazonaws.com" - }, - "fips-us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "deprecated" : true, - "hostname" : "sms-fips.us-east-2.amazonaws.com" - }, - "fips-us-west-1" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "deprecated" : true, - "hostname" : "sms-fips.us-west-1.amazonaws.com" - }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -14878,26 +15035,6 @@ "deprecated" : true, "hostname" : "sms-fips.us-west-2.amazonaws.com" }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { - "variants" : [ { - "hostname" : "sms-fips.us-east-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-east-2" : { - "variants" : [ { - "hostname" : "sms-fips.us-east-2.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-west-1" : { - "variants" : [ { - "hostname" : "sms-fips.us-west-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, "us-west-2" : { "variants" : [ { "hostname" : "sms-fips.us-west-2.amazonaws.com", @@ -15227,6 +15364,7 @@ "deprecated" : true, "hostname" : "sns-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15310,6 +15448,7 @@ "deprecated" : true, "hostname" : "sqs-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15402,6 +15541,7 @@ "deprecated" : true, "hostname" : "ssm-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15741,6 +15881,7 @@ "deprecated" : true, "hostname" : "states-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15889,6 +16030,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "local" : { "credentialScope" : { "region" : "us-east-1" @@ -15933,6 +16075,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16059,6 +16202,7 @@ "deprecated" : true, "hostname" : "swf-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16138,6 +16282,7 @@ "deprecated" : true, "hostname" : "synthetics-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16189,6 +16334,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16275,6 +16421,15 @@ } } }, + "tnb" : { + "endpoints" : { + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-3" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "transcribe" : { "defaults" : { "protocols" : [ "https" ], @@ -16734,6 +16889,7 @@ "ap-northeast-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -17126,6 +17282,7 @@ "credentialScope" : { "region" : "il-central-1" }, + "deprecated" : true, "hostname" : "waf-regional-fips.il-central-1.amazonaws.com" }, "fips-me-central-1" : { @@ -17177,6 +17334,16 @@ "deprecated" : true, "hostname" : "waf-regional-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "waf-regional.il-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "waf-regional-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "me-central-1" : { "credentialScope" : { "region" : "me-central-1" @@ -17595,6 +17762,7 @@ "credentialScope" : { "region" : "il-central-1" }, + "deprecated" : true, "hostname" : "wafv2-fips.il-central-1.amazonaws.com" }, "fips-me-central-1" : { @@ -17646,6 +17814,16 @@ "deprecated" : true, "hostname" : "wafv2-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "credentialScope" : { + "region" : "il-central-1" + }, + "hostname" : "wafv2.il-central-1.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.il-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "me-central-1" : { "credentialScope" : { "region" : "me-central-1" @@ -17919,6 +18097,7 @@ "deprecated" : true, "hostname" : "xray-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -18094,6 +18273,12 @@ "cn-northwest-1" : { } } }, + "arc-zonal-shift" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "athena" : { "endpoints" : { "cn-north-1" : { @@ -18942,15 +19127,20 @@ }, "savingsplans" : { "endpoints" : { - "aws-cn" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "savingsplans.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { "credentialScope" : { "region" : "cn-northwest-1" }, "hostname" : "savingsplans.cn-northwest-1.amazonaws.com.cn" } }, - "isRegionalized" : false, - "partitionEndpoint" : "aws-cn" + "isRegionalized" : true }, "secretsmanager" : { "endpoints" : { @@ -18987,13 +19177,13 @@ "endpoints" : { "cn-north-1" : { "variants" : [ { - "hostname" : "servicediscovery.cn-north-1.amazonaws.com.cn", + "hostname" : "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", "tags" : [ "dualstack" ] } ] }, "cn-northwest-1" : { "variants" : [ { - "hostname" : "servicediscovery.cn-northwest-1.amazonaws.com.cn", + "hostname" : "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", "tags" : [ "dualstack" ] } ] } @@ -19016,8 +19206,7 @@ }, "sms" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { } } }, "snowball" : { @@ -21813,6 +22002,12 @@ } } }, + "license-manager-linux-subscriptions" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "logs" : { "endpoints" : { "fips-us-gov-east-1" : { @@ -22870,6 +23065,9 @@ }, "us-gov-east-1" : { "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] }, { @@ -22886,6 +23084,9 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] }, { @@ -22947,13 +23148,6 @@ }, "sms" : { "endpoints" : { - "fips-us-gov-east-1" : { - "credentialScope" : { - "region" : "us-gov-east-1" - }, - "deprecated" : true, - "hostname" : "sms-fips.us-gov-east-1.amazonaws.com" - }, "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" @@ -22961,12 +23155,6 @@ "deprecated" : true, "hostname" : "sms-fips.us-gov-west-1.amazonaws.com" }, - "us-gov-east-1" : { - "variants" : [ { - "hostname" : "sms-fips.us-gov-east-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, "us-gov-west-1" : { "variants" : [ { "hostname" : "sms-fips.us-gov-west-1.amazonaws.com", @@ -24072,11 +24260,24 @@ "deprecated" : true, "hostname" : "rbin-fips.us-iso-east-1.c2s.ic.gov" }, + "fips-us-iso-west-1" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "rbin-fips.us-iso-west-1.c2s.ic.gov" + }, "us-iso-east-1" : { "variants" : [ { "hostname" : "rbin-fips.us-iso-east-1.c2s.ic.gov", "tags" : [ "fips" ] } ] + }, + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "rbin-fips.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] } } }, diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 8a6bc502638..5deffe013c6 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkServiceClientConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkServiceClientConfiguration.java index 8928ab52d3f..7d727d4069f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkServiceClientConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkServiceClientConfiguration.java @@ -97,27 +97,39 @@ public interface Builder { /** * Return the client override configuration */ - ClientOverrideConfiguration overrideConfiguration(); + default ClientOverrideConfiguration overrideConfiguration() { + throw new UnsupportedOperationException(); + } /** * Return the endpoint override */ - URI endpointOverride(); + default URI endpointOverride() { + throw new UnsupportedOperationException(); + } - EndpointProvider endpointProvider(); + default EndpointProvider endpointProvider() { + throw new UnsupportedOperationException(); + } /** * Configure the client override configuration */ - Builder overrideConfiguration(ClientOverrideConfiguration clientOverrideConfiguration); + default Builder overrideConfiguration(ClientOverrideConfiguration clientOverrideConfiguration) { + throw new UnsupportedOperationException(); + } /** * Configure the endpoint override */ - Builder endpointOverride(URI endpointOverride); + default Builder endpointOverride(URI endpointOverride) { + throw new UnsupportedOperationException(); + } - Builder endpointProvider(EndpointProvider endpointProvider); + default Builder endpointProvider(EndpointProvider endpointProvider) { + throw new UnsupportedOperationException(); + } /** * Build the service client configuration using the configuration on this builder diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java index 07dea156808..4c7d70ab755 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java @@ -25,14 +25,17 @@ import java.util.Arrays; import java.util.Optional; import java.util.concurrent.ExecutorService; +import java.util.function.Consumer; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.internal.async.ByteBuffersAsyncRequestBody; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; import software.amazon.awssdk.core.internal.async.InputStreamWithExecutorAsyncRequestBody; +import software.amazon.awssdk.core.internal.async.SplittingPublisher; import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.Validate; /** * Interface to allow non-blocking streaming of request content. This follows the reactive streams pattern where this interface is @@ -399,4 +402,40 @@ static BlockingOutputStreamAsyncRequestBody forBlockingOutputStream(Long content static AsyncRequestBody empty() { return fromBytes(new byte[0]); } + + + /** + * Converts this {@link AsyncRequestBody} to a publisher of {@link AsyncRequestBody}s, each of which publishes a specific + * portion of the original data, based on the provided {@link AsyncRequestBodySplitConfiguration}. The default chunk size + * is 2MB and the default buffer size is 8MB. + * + *

+ * If content length of this {@link AsyncRequestBody} is present, each divided {@link AsyncRequestBody} is delivered to the + * subscriber right after it's initialized. + *

+ * If content length is null, it is sent after the entire content for that chunk is buffered. + * In this case, the configured {@code maxMemoryUsageInBytes} must be larger than or equal to {@code chunkSizeInBytes}. + * + * @see AsyncRequestBodySplitConfiguration + */ + default SdkPublisher split(AsyncRequestBodySplitConfiguration splitConfiguration) { + Validate.notNull(splitConfiguration, "splitConfiguration"); + + return SplittingPublisher.builder() + .asyncRequestBody(this) + .chunkSizeInBytes(splitConfiguration.chunkSizeInBytes()) + .bufferSizeInBytes(splitConfiguration.bufferSizeInBytes()) + .build(); + } + + /** + * This is a convenience method that passes an instance of the {@link AsyncRequestBodySplitConfiguration} builder, + * avoiding the need to create one manually via {@link AsyncRequestBodySplitConfiguration#builder()}. + * + * @see #split(AsyncRequestBodySplitConfiguration) + */ + default SdkPublisher split(Consumer splitConfiguration) { + Validate.notNull(splitConfiguration, "splitConfiguration"); + return split(AsyncRequestBodySplitConfiguration.builder().applyMutation(splitConfiguration).build()); + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java new file mode 100644 index 00000000000..fe51f33b4ff --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java @@ -0,0 +1,141 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.async; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Configuration options for {@link AsyncRequestBody#split} to configure how the SDK + * should split an {@link SdkPublisher}. + */ +@SdkPublicApi +public final class AsyncRequestBodySplitConfiguration implements ToCopyableBuilder { + private final Long chunkSizeInBytes; + private final Long bufferSizeInBytes; + + private AsyncRequestBodySplitConfiguration(DefaultBuilder builder) { + this.chunkSizeInBytes = Validate.isPositiveOrNull(builder.chunkSizeInBytes, "chunkSizeInBytes"); + this.bufferSizeInBytes = Validate.isPositiveOrNull(builder.bufferSizeInBytes, "bufferSizeInBytes"); + } + + /** + * The configured chunk size for each divided {@link AsyncRequestBody}. + */ + public Long chunkSizeInBytes() { + return chunkSizeInBytes; + } + + /** + * The configured maximum buffer size the SDK will use to buffer the content from the source {@link SdkPublisher}. + */ + public Long bufferSizeInBytes() { + return bufferSizeInBytes; + } + + /** + * Create a {@link Builder}, used to create a {@link AsyncRequestBodySplitConfiguration}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AsyncRequestBodySplitConfiguration that = (AsyncRequestBodySplitConfiguration) o; + + if (!Objects.equals(chunkSizeInBytes, that.chunkSizeInBytes)) { + return false; + } + return Objects.equals(bufferSizeInBytes, that.bufferSizeInBytes); + } + + @Override + public int hashCode() { + int result = chunkSizeInBytes != null ? chunkSizeInBytes.hashCode() : 0; + result = 31 * result + (bufferSizeInBytes != null ? bufferSizeInBytes.hashCode() : 0); + return result; + } + + @Override + public AsyncRequestBodySplitConfiguration.Builder toBuilder() { + return new DefaultBuilder(this); + } + + public interface Builder extends CopyableBuilder { + + /** + * Configures the size for each divided chunk. The last chunk may be smaller than the configured size. The default value + * is 2MB. + * + * @param chunkSizeInBytes the chunk size in bytes + * @return This object for method chaining. + */ + Builder chunkSizeInBytes(Long chunkSizeInBytes); + + /** + * The maximum buffer size the SDK will use to buffer the content from the source {@link SdkPublisher}. The default value + * is 8MB. + * + * @param bufferSizeInBytes the buffer size in bytes + * @return This object for method chaining. + */ + Builder bufferSizeInBytes(Long bufferSizeInBytes); + } + + private static final class DefaultBuilder implements Builder { + private Long chunkSizeInBytes; + private Long bufferSizeInBytes; + + private DefaultBuilder(AsyncRequestBodySplitConfiguration asyncRequestBodySplitConfiguration) { + this.chunkSizeInBytes = asyncRequestBodySplitConfiguration.chunkSizeInBytes; + this.bufferSizeInBytes = asyncRequestBodySplitConfiguration.bufferSizeInBytes; + } + + private DefaultBuilder() { + + } + + @Override + public Builder chunkSizeInBytes(Long chunkSizeInBytes) { + this.chunkSizeInBytes = chunkSizeInBytes; + return this; + } + + @Override + public Builder bufferSizeInBytes(Long bufferSizeInBytes) { + this.bufferSizeInBytes = bufferSizeInBytes; + return this; + } + + @Override + public AsyncRequestBodySplitConfiguration build() { + return new AsyncRequestBodySplitConfiguration(this); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java index 101aed7b744..146007927c6 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java @@ -239,7 +239,7 @@ private static final class SynchronousChunkBuffer { } private Iterable buffer(ByteBuffer bytes) { - return chunkBuffer.bufferAndCreateChunks(bytes); + return chunkBuffer.split(bytes); } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java index 93d6d09578a..c171b078767 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java @@ -19,10 +19,11 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; @@ -31,17 +32,20 @@ */ @SdkInternalApi public final class ChunkBuffer { - private final AtomicLong remainingBytes; + private static final Logger log = Logger.loggerFor(ChunkBuffer.class); + private final AtomicLong transferredBytes; private final ByteBuffer currentBuffer; - private final int bufferSize; + private final int chunkSize; + private final long totalBytes; private ChunkBuffer(Long totalBytes, Integer bufferSize) { Validate.notNull(totalBytes, "The totalBytes must not be null"); int chunkSize = bufferSize != null ? bufferSize : DEFAULT_ASYNC_CHUNK_SIZE; - this.bufferSize = chunkSize; + this.chunkSize = chunkSize; this.currentBuffer = ByteBuffer.allocate(chunkSize); - this.remainingBytes = new AtomicLong(totalBytes); + this.totalBytes = totalBytes; + this.transferredBytes = new AtomicLong(0); } public static Builder builder() { @@ -49,52 +53,97 @@ public static Builder builder() { } - // currentBuffer and bufferedList can get over written if concurrent Threads calls this method at the same time. - public synchronized Iterable bufferAndCreateChunks(ByteBuffer buffer) { - int startPosition = 0; - List bufferedList = new ArrayList<>(); - int currentBytesRead = buffer.remaining(); - do { - int bufferedBytes = currentBuffer.position(); - int availableToRead = bufferSize - bufferedBytes; - int bytesToMove = Math.min(availableToRead, currentBytesRead - startPosition); + /** + * Split the input {@link ByteBuffer} into multiple smaller {@link ByteBuffer}s, each of which contains {@link #chunkSize} + * worth of bytes. If the last chunk of the input ByteBuffer contains less than {@link #chunkSize} data, the last chunk will + * be buffered. + */ + public synchronized Iterable split(ByteBuffer inputByteBuffer) { - byte[] bytes = BinaryUtils.copyAllBytesFrom(buffer); - if (bufferedBytes == 0) { - currentBuffer.put(bytes, startPosition, bytesToMove); - } else { - currentBuffer.put(bytes, 0, bytesToMove); + if (!inputByteBuffer.hasRemaining()) { + return Collections.singletonList(inputByteBuffer); + } + + List byteBuffers = new ArrayList<>(); + + // If current buffer is not empty, fill the buffer first. + if (currentBuffer.position() != 0) { + fillCurrentBuffer(inputByteBuffer); + + if (isCurrentBufferFull()) { + addCurrentBufferToIterable(byteBuffers, chunkSize); + } + } + + // If the input buffer is not empty, split the input buffer + if (inputByteBuffer.hasRemaining()) { + splitRemainingInputByteBuffer(inputByteBuffer, byteBuffers); + } + + // If this is the last chunk, add data buffered to the iterable + if (isLastChunk()) { + int remainingBytesInBuffer = currentBuffer.position(); + addCurrentBufferToIterable(byteBuffers, remainingBytesInBuffer); + } + return byteBuffers; + } + + private boolean isCurrentBufferFull() { + return currentBuffer.position() == chunkSize; + } + + /** + * Splits the input ByteBuffer to multiple chunks and add them to the iterable. + */ + private void splitRemainingInputByteBuffer(ByteBuffer inputByteBuffer, List byteBuffers) { + while (inputByteBuffer.hasRemaining()) { + ByteBuffer inputByteBufferCopy = inputByteBuffer.asReadOnlyBuffer(); + if (inputByteBuffer.remaining() < chunkSize) { + currentBuffer.put(inputByteBuffer); + break; } - startPosition = startPosition + bytesToMove; - - // Send the data once the buffer is full - if (currentBuffer.position() == bufferSize) { - currentBuffer.position(0); - ByteBuffer bufferToSend = ByteBuffer.allocate(bufferSize); - bufferToSend.put(currentBuffer.array(), 0, bufferSize); - bufferToSend.clear(); - currentBuffer.clear(); - bufferedList.add(bufferToSend); - remainingBytes.addAndGet(-bufferSize); + int newLimit = inputByteBufferCopy.position() + chunkSize; + inputByteBufferCopy.limit(newLimit); + inputByteBuffer.position(newLimit); + byteBuffers.add(inputByteBufferCopy); + transferredBytes.addAndGet(chunkSize); + } + } + + private boolean isLastChunk() { + long remainingBytes = totalBytes - transferredBytes.get(); + return remainingBytes != 0 && remainingBytes == currentBuffer.position(); + } + + private void addCurrentBufferToIterable(List byteBuffers, int capacity) { + ByteBuffer bufferedChunk = ByteBuffer.allocate(capacity); + currentBuffer.flip(); + bufferedChunk.put(currentBuffer); + bufferedChunk.flip(); + byteBuffers.add(bufferedChunk); + transferredBytes.addAndGet(bufferedChunk.remaining()); + currentBuffer.clear(); + } + + private void fillCurrentBuffer(ByteBuffer inputByteBuffer) { + while (currentBuffer.position() < chunkSize) { + if (!inputByteBuffer.hasRemaining()) { + break; + } + + int remainingCapacity = chunkSize - currentBuffer.position(); + + if (inputByteBuffer.remaining() < remainingCapacity) { + currentBuffer.put(inputByteBuffer); + } else { + ByteBuffer remainingChunk = inputByteBuffer.asReadOnlyBuffer(); + int newLimit = inputByteBuffer.position() + remainingCapacity; + remainingChunk.limit(newLimit); + inputByteBuffer.position(newLimit); + currentBuffer.put(remainingChunk); } - } while (startPosition < currentBytesRead); - - int remainingBytesInBuffer = currentBuffer.position(); - - // Send the remaining buffer when - // 1. remainingBytes in buffer are same as the last few bytes to be read. - // 2. If it is a zero byte and the last byte to be read. - if (remainingBytes.get() == remainingBytesInBuffer && - (buffer.remaining() == 0 || remainingBytesInBuffer > 0)) { - currentBuffer.clear(); - ByteBuffer trimmedBuffer = ByteBuffer.allocate(remainingBytesInBuffer); - trimmedBuffer.put(currentBuffer.array(), 0, remainingBytesInBuffer); - trimmedBuffer.clear(); - bufferedList.add(trimmedBuffer); - remainingBytes.addAndGet(-remainingBytesInBuffer); } - return bufferedList; } public interface Builder extends SdkBuilder { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java new file mode 100644 index 00000000000..c56d1b6437d --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java @@ -0,0 +1,331 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import java.nio.ByteBuffer; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.exception.NonRetryableException; +import software.amazon.awssdk.core.internal.util.NoopSubscription; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.async.SimplePublisher; + +/** + * Splits an {@link AsyncRequestBody} to multiple smaller {@link AsyncRequestBody}s, each of which publishes a specific portion of + * the original data. + * + *

If content length is known, each {@link AsyncRequestBody} is sent to the subscriber right after it's initialized. + * Otherwise, it is sent after the entire content for that chunk is buffered. This is required to get content length. + */ +@SdkInternalApi +public class SplittingPublisher implements SdkPublisher { + private static final Logger log = Logger.loggerFor(SplittingPublisher.class); + private static final long DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024L; + private static final long DEFAULT_BUFFER_SIZE = DEFAULT_CHUNK_SIZE * 4; + private final AsyncRequestBody upstreamPublisher; + private final SplittingSubscriber splittingSubscriber; + private final SimplePublisher downstreamPublisher = new SimplePublisher<>(); + private final long chunkSizeInBytes; + private final long bufferSizeInBytes; + + private SplittingPublisher(Builder builder) { + this.upstreamPublisher = Validate.paramNotNull(builder.asyncRequestBody, "asyncRequestBody"); + this.chunkSizeInBytes = builder.chunkSizeInBytes == null ? DEFAULT_CHUNK_SIZE : builder.chunkSizeInBytes; + this.bufferSizeInBytes = builder.bufferSizeInBytes == null ? DEFAULT_BUFFER_SIZE : builder.bufferSizeInBytes; + this.splittingSubscriber = new SplittingSubscriber(upstreamPublisher.contentLength().orElse(null)); + + if (!upstreamPublisher.contentLength().isPresent()) { + Validate.isTrue(bufferSizeInBytes >= chunkSizeInBytes, + "bufferSizeInBytes must be larger than or equal to " + + "chunkSizeInBytes if the content length is unknown"); + } + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public void subscribe(Subscriber downstreamSubscriber) { + downstreamPublisher.subscribe(downstreamSubscriber); + upstreamPublisher.subscribe(splittingSubscriber); + } + + private class SplittingSubscriber implements Subscriber { + private Subscription upstreamSubscription; + private final Long upstreamSize; + private final AtomicInteger chunkNumber = new AtomicInteger(0); + private volatile DownstreamBody currentBody; + private final AtomicBoolean hasOpenUpstreamDemand = new AtomicBoolean(false); + private final AtomicLong dataBuffered = new AtomicLong(0); + + /** + * A hint to determine whether we will exceed maxMemoryUsage by the next OnNext call. + */ + private int byteBufferSizeHint; + private volatile boolean upstreamComplete; + + SplittingSubscriber(Long upstreamSize) { + this.upstreamSize = upstreamSize; + } + + @Override + public void onSubscribe(Subscription s) { + this.upstreamSubscription = s; + this.currentBody = + initializeNextDownstreamBody(upstreamSize != null, calculateChunkSize(upstreamSize), + chunkNumber.get()); + // We need to request subscription *after* we set currentBody because onNext could be invoked right away. + upstreamSubscription.request(1); + } + + private DownstreamBody initializeNextDownstreamBody(boolean contentLengthKnown, long chunkSize, int chunkNumber) { + DownstreamBody body = new DownstreamBody(contentLengthKnown, chunkSize, chunkNumber); + if (contentLengthKnown) { + sendCurrentBody(body); + } + return body; + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + hasOpenUpstreamDemand.set(false); + byteBufferSizeHint = byteBuffer.remaining(); + + while (true) { + + if (!byteBuffer.hasRemaining()) { + break; + } + + int amountRemainingInChunk = amountRemainingInChunk(); + + // If we have fulfilled this chunk, + // complete the current body + if (amountRemainingInChunk == 0) { + completeCurrentBodyAndCreateNewIfNeeded(byteBuffer); + amountRemainingInChunk = amountRemainingInChunk(); + } + + // If the current ByteBuffer < this chunk, send it as-is + if (amountRemainingInChunk > byteBuffer.remaining()) { + currentBody.send(byteBuffer.duplicate()); + break; + } + + // If the current ByteBuffer == this chunk, send it as-is and + // complete the current body + if (amountRemainingInChunk == byteBuffer.remaining()) { + currentBody.send(byteBuffer.duplicate()); + completeCurrentBodyAndCreateNewIfNeeded(byteBuffer); + break; + } + + // If the current ByteBuffer > this chunk, split this ByteBuffer + ByteBuffer firstHalf = byteBuffer.duplicate(); + int newLimit = firstHalf.position() + amountRemainingInChunk; + firstHalf.limit(newLimit); + byteBuffer.position(newLimit); + currentBody.send(firstHalf); + } + + maybeRequestMoreUpstreamData(); + } + + private void completeCurrentBodyAndCreateNewIfNeeded(ByteBuffer byteBuffer) { + completeCurrentBody(); + int currentChunk = chunkNumber.incrementAndGet(); + boolean shouldCreateNewDownstreamRequestBody; + Long dataRemaining = totalDataRemaining(); + + if (upstreamSize == null) { + shouldCreateNewDownstreamRequestBody = !upstreamComplete || byteBuffer.hasRemaining(); + } else { + shouldCreateNewDownstreamRequestBody = dataRemaining != null && dataRemaining > 0; + } + + if (shouldCreateNewDownstreamRequestBody) { + long chunkSize = calculateChunkSize(dataRemaining); + currentBody = initializeNextDownstreamBody(upstreamSize != null, chunkSize, currentChunk); + } + } + + private int amountRemainingInChunk() { + return Math.toIntExact(currentBody.maxLength - currentBody.transferredLength); + } + + private void completeCurrentBody() { + log.debug(() -> "completeCurrentBody for chunk " + chunkNumber.get()); + currentBody.complete(); + if (upstreamSize == null) { + sendCurrentBody(currentBody); + } + } + + @Override + public void onComplete() { + upstreamComplete = true; + log.trace(() -> "Received onComplete()"); + completeCurrentBody(); + downstreamPublisher.complete(); + } + + @Override + public void onError(Throwable t) { + log.trace(() -> "Received onError()", t); + downstreamPublisher.error(t); + } + + private void sendCurrentBody(AsyncRequestBody body) { + downstreamPublisher.send(body).exceptionally(t -> { + downstreamPublisher.error(t); + return null; + }); + } + + private long calculateChunkSize(Long dataRemaining) { + // Use default chunk size if the content length is unknown + if (dataRemaining == null) { + return chunkSizeInBytes; + } + + return Math.min(chunkSizeInBytes, dataRemaining); + } + + private void maybeRequestMoreUpstreamData() { + long buffered = dataBuffered.get(); + if (shouldRequestMoreData(buffered) && + hasOpenUpstreamDemand.compareAndSet(false, true)) { + log.trace(() -> "Requesting more data, current data buffered: " + buffered); + upstreamSubscription.request(1); + } + } + + private boolean shouldRequestMoreData(long buffered) { + return buffered == 0 || buffered + byteBufferSizeHint <= bufferSizeInBytes; + } + + private Long totalDataRemaining() { + if (upstreamSize == null) { + return null; + } + return upstreamSize - (chunkNumber.get() * chunkSizeInBytes); + } + + private final class DownstreamBody implements AsyncRequestBody { + + /** + * The maximum length of the content this AsyncRequestBody can hold. If the upstream content length is known, this is + * the same as totalLength + */ + private final long maxLength; + private final Long totalLength; + private final SimplePublisher delegate = new SimplePublisher<>(); + private final int chunkNumber; + private final AtomicBoolean subscribeCalled = new AtomicBoolean(false); + private volatile long transferredLength = 0; + + private DownstreamBody(boolean contentLengthKnown, long maxLength, int chunkNumber) { + this.totalLength = contentLengthKnown ? maxLength : null; + this.maxLength = maxLength; + this.chunkNumber = chunkNumber; + } + + @Override + public Optional contentLength() { + return totalLength != null ? Optional.of(totalLength) : Optional.of(transferredLength); + } + + public void send(ByteBuffer data) { + log.trace(() -> String.format("Sending bytebuffer %s to chunk %d", data, chunkNumber)); + int length = data.remaining(); + transferredLength += length; + addDataBuffered(length); + delegate.send(data).whenComplete((r, t) -> { + addDataBuffered(-length); + if (t != null) { + error(t); + } + }); + } + + public void complete() { + log.debug(() -> "Received complete() for chunk number: " + chunkNumber + " length " + transferredLength); + delegate.complete().whenComplete((r, t) -> { + if (t != null) { + error(t); + } + }); + } + + public void error(Throwable error) { + delegate.error(error); + } + + @Override + public void subscribe(Subscriber s) { + if (subscribeCalled.compareAndSet(false, true)) { + delegate.subscribe(s); + } else { + s.onSubscribe(new NoopSubscription(s)); + s.onError(NonRetryableException.create( + "A retry was attempted, but AsyncRequestBody.split does not " + + "support retries.")); + } + } + + private void addDataBuffered(int length) { + dataBuffered.addAndGet(length); + if (length < 0) { + maybeRequestMoreUpstreamData(); + } + } + } + } + + public static final class Builder { + private AsyncRequestBody asyncRequestBody; + private Long chunkSizeInBytes; + private Long bufferSizeInBytes; + + public Builder asyncRequestBody(AsyncRequestBody asyncRequestBody) { + this.asyncRequestBody = asyncRequestBody; + return this; + } + + public Builder chunkSizeInBytes(Long chunkSizeInBytes) { + this.chunkSizeInBytes = chunkSizeInBytes; + return this; + } + + public Builder bufferSizeInBytes(Long bufferSizeInBytes) { + this.bufferSizeInBytes = bufferSizeInBytes; + return this; + } + + public SplittingPublisher build() { + return new SplittingPublisher(this); + } + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyConfigurationTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyConfigurationTest.java new file mode 100644 index 00000000000..8b8f78f2b5e --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyConfigurationTest.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.async; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +public class AsyncRequestBodyConfigurationTest { + + @Test + void equalsHashCode() { + EqualsVerifier.forClass(AsyncRequestBodySplitConfiguration.class) + .verify(); + } + + @ParameterizedTest + @ValueSource(longs = {0, -1}) + void nonPositiveValue_shouldThrowException(long size) { + assertThatThrownBy(() -> + AsyncRequestBodySplitConfiguration.builder() + .chunkSizeInBytes(size) + .build()) + .hasMessageContaining("must be positive"); + assertThatThrownBy(() -> + AsyncRequestBodySplitConfiguration.builder() + .bufferSizeInBytes(size) + .build()) + .hasMessageContaining("must be positive"); + } + + @Test + void toBuilder_shouldCopyAllFields() { + AsyncRequestBodySplitConfiguration config = AsyncRequestBodySplitConfiguration.builder() + .bufferSizeInBytes(1L) + .chunkSizeInBytes(2L) + .build(); + + assertThat(config.toBuilder().build()).isEqualTo(config); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java index aab643cbb6a..cdd87822e3d 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/AsyncRequestBodyTest.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.core.async; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java index 8b73402dc46..a553a55a453 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java @@ -18,6 +18,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import java.io.ByteArrayInputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -29,8 +31,12 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.apache.commons.lang3.RandomStringUtils; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import software.amazon.awssdk.core.internal.async.ChunkBuffer; +import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.StringUtils; class ChunkBufferTest { @@ -40,42 +46,38 @@ void builderWithNoTotalSize() { assertThatThrownBy(() -> ChunkBuffer.builder().build()).isInstanceOf(NullPointerException.class); } - @Test - void numberOfChunkMultipleOfTotalBytes() { - String inputString = StringUtils.repeat("*", 25); - - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(5).totalBytes(inputString.getBytes(StandardCharsets.UTF_8).length).build(); - Iterable byteBuffers = - chunkBuffer.bufferAndCreateChunks(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); - - AtomicInteger iteratedCounts = new AtomicInteger(); - byteBuffers.forEach(r -> { - iteratedCounts.getAndIncrement(); - assertThat(r.array()).isEqualTo(StringUtils.repeat("*", 5).getBytes(StandardCharsets.UTF_8)); - }); - assertThat(iteratedCounts.get()).isEqualTo(5); - } - - @Test - void numberOfChunk_Not_MultipleOfTotalBytes() { - int totalBytes = 23; + @ParameterizedTest + @ValueSource(ints = {1, 6, 10, 23, 25}) + void numberOfChunk_Not_MultipleOfTotalBytes(int totalBytes) { int bufferSize = 5; - String inputString = StringUtils.repeat("*", totalBytes); + String inputString = RandomStringUtils.randomAscii(totalBytes); ChunkBuffer chunkBuffer = ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(inputString.getBytes(StandardCharsets.UTF_8).length).build(); Iterable byteBuffers = - chunkBuffer.bufferAndCreateChunks(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); + chunkBuffer.split(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); + + AtomicInteger index = new AtomicInteger(0); + int count = (int) Math.ceil(totalBytes / (double) bufferSize); + int remainder = totalBytes % bufferSize; - AtomicInteger iteratedCounts = new AtomicInteger(); byteBuffers.forEach(r -> { - iteratedCounts.getAndIncrement(); - if (iteratedCounts.get() * bufferSize < totalBytes) { - assertThat(r.array()).isEqualTo(StringUtils.repeat("*", bufferSize).getBytes(StandardCharsets.UTF_8)); - } else { - assertThat(r.array()).isEqualTo(StringUtils.repeat("*", 3).getBytes(StandardCharsets.UTF_8)); + int i = index.get(); + try (ByteArrayInputStream inputStream = new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8))) { + byte[] expected; + if (i == count - 1 && remainder != 0) { + expected = new byte[remainder]; + } else { + expected = new byte[bufferSize]; + } + inputStream.skip(i * bufferSize); + inputStream.read(expected); + byte[] actualBytes = BinaryUtils.copyBytesFrom(r); + assertThat(actualBytes).isEqualTo(expected); + index.incrementAndGet(); + } catch (IOException e) { + throw new RuntimeException(e); } }); } @@ -86,7 +88,7 @@ void zeroTotalBytesAsInput_returnsZeroByte() { ChunkBuffer chunkBuffer = ChunkBuffer.builder().bufferSize(5).totalBytes(zeroByte.length).build(); Iterable byteBuffers = - chunkBuffer.bufferAndCreateChunks(ByteBuffer.wrap(zeroByte)); + chunkBuffer.split(ByteBuffer.wrap(zeroByte)); AtomicInteger iteratedCounts = new AtomicInteger(); byteBuffers.forEach(r -> { @@ -104,16 +106,16 @@ void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { ChunkBuffer chunkBuffer = ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(wrap.remaining()).build(); Iterable byteBuffers = - chunkBuffer.bufferAndCreateChunks(wrap); + chunkBuffer.split(wrap); AtomicInteger iteratedCounts = new AtomicInteger(); byteBuffers.forEach(r -> { iteratedCounts.getAndIncrement(); if (iteratedCounts.get() * bufferSize < totalBytes) { // array of empty bytes - assertThat(r.array()).isEqualTo(ByteBuffer.allocate(bufferSize).array()); + assertThat(BinaryUtils.copyBytesFrom(r)).isEqualTo(ByteBuffer.allocate(bufferSize).array()); } else { - assertThat(r.array()).isEqualTo(ByteBuffer.allocate(totalBytes % bufferSize).array()); + assertThat(BinaryUtils.copyBytesFrom(r)).isEqualTo(ByteBuffer.allocate(totalBytes % bufferSize).array()); } }); assertThat(iteratedCounts.get()).isEqualTo(4); @@ -167,7 +169,7 @@ void concurrentTreads_calling_bufferAndCreateChunks() throws ExecutionException, futures = IntStream.range(0, threads).>mapToObj(t -> service.submit(() -> { String inputString = StringUtils.repeat(Integer.toString(counter.incrementAndGet()), totalBytes); - return chunkBuffer.bufferAndCreateChunks(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); + return chunkBuffer.split(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); })).collect(Collectors.toCollection(() -> new ArrayList<>(threads))); AtomicInteger filledBuffers = new AtomicInteger(0); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java index 90294bd2767..39abaffd8f7 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java @@ -28,6 +28,7 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import io.reactivex.Flowable; +import org.apache.commons.lang3.RandomStringUtils; import org.assertj.core.util.Lists; import org.junit.Test; import org.junit.runner.RunWith; @@ -52,11 +53,26 @@ public class ChecksumCalculatingAsyncRequestBodyTest { "x-amz-checksum-crc32:i9aeUg==\r\n\r\n"; private final static Path path; + private final static ByteBuffer positionNonZeroBytebuffer; + + private final static ByteBuffer positionZeroBytebuffer; + static { + byte[] content = testString.getBytes(); + byte[] randomContent = RandomStringUtils.randomAscii(1024).getBytes(StandardCharsets.UTF_8); + positionNonZeroBytebuffer = ByteBuffer.allocate(content.length + randomContent.length); + positionNonZeroBytebuffer.put(randomContent) + .put(content); + positionNonZeroBytebuffer.position(randomContent.length); + + positionZeroBytebuffer = ByteBuffer.allocate(content.length); + positionZeroBytebuffer.put(content); + positionZeroBytebuffer.flip(); + FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); path = fs.getPath("./test"); try { - Files.write(path, testString.getBytes()); + Files.write(path, content); } catch (IOException e) { e.printStackTrace(); } @@ -71,16 +87,25 @@ public ChecksumCalculatingAsyncRequestBodyTest(AsyncRequestBody provider) { @Parameterized.Parameters public static AsyncRequestBody[] data() { AsyncRequestBody[] asyncRequestBodies = { - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromString(testString)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), - - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromFile(path)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), - }; + ChecksumCalculatingAsyncRequestBody.builder() + .asyncRequestBody(AsyncRequestBody.fromString(testString)) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32").build(), + + ChecksumCalculatingAsyncRequestBody.builder() + .asyncRequestBody(AsyncRequestBody.fromFile(path)) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32").build(), + + ChecksumCalculatingAsyncRequestBody.builder() + .asyncRequestBody(AsyncRequestBody.fromRemainingByteBuffer(positionZeroBytebuffer)) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32").build(), + ChecksumCalculatingAsyncRequestBody.builder() + .asyncRequestBody(AsyncRequestBody.fromRemainingByteBuffersUnsafe(positionNonZeroBytebuffer)) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32").build(), + }; return asyncRequestBodies; } @@ -120,30 +145,30 @@ public void onComplete() { @Test public void stringConstructorHasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromString("Hello world")) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32") - .build(); + .asyncRequestBody(AsyncRequestBody.fromString("Hello world")) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32") + .build(); assertThat(requestBody.contentType()).startsWith(Mimetype.MIMETYPE_TEXT_PLAIN); } @Test public void fileConstructorHasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromFile(path)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32") - .build(); + .asyncRequestBody(AsyncRequestBody.fromFile(path)) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32") + .build(); assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); } @Test public void bytesArrayConstructorHasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromBytes("hello world".getBytes())) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32") - .build(); + .asyncRequestBody(AsyncRequestBody.fromBytes("hello world".getBytes())) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32") + .build(); assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); } @@ -151,20 +176,20 @@ public void bytesArrayConstructorHasCorrectContentType() { public void bytesBufferConstructorHasCorrectContentType() { ByteBuffer byteBuffer = ByteBuffer.wrap("hello world".getBytes()); AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromByteBuffer(byteBuffer)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32") - .build(); + .asyncRequestBody(AsyncRequestBody.fromByteBuffer(byteBuffer)) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32") + .build(); assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); } @Test public void emptyBytesConstructorHasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.empty()) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32") - .build(); + .asyncRequestBody(AsyncRequestBody.empty()) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32") + .build(); assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); } @@ -172,8 +197,8 @@ public void emptyBytesConstructorHasCorrectContentType() { public void publisherConstructorThrowsExceptionIfNoContentLength() { List requestBodyStrings = Lists.newArrayList("A", "B", "C"); List bodyBytes = requestBodyStrings.stream() - .map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) - .collect(Collectors.toList()); + .map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) + .collect(Collectors.toList()); Publisher bodyPublisher = Flowable.fromIterable(bodyBytes); ChecksumCalculatingAsyncRequestBody.Builder builder = ChecksumCalculatingAsyncRequestBody.builder() @@ -208,16 +233,16 @@ public void fromBytes_byteArrayNotNullChecksumSupplied() { byte[] original = {1, 2, 3, 4}; // Checksum data in byte format. byte[] expected = {52, 13, 10, - 1, 2, 3, 4, 13, 10, - 48, 13, 10, 120, 45, 97, 109, 122, 110, 45, 99, 104, 101, 99, 107, 115, - 117, 109, 45, 99, 114, 99, 51, 50, 58, 116, 106, 122, 55, 122, 81, 61, 61, 13, 10, 13, 10}; + 1, 2, 3, 4, 13, 10, + 48, 13, 10, 120, 45, 97, 109, 122, 110, 45, 99, 104, 101, 99, 107, 115, + 117, 109, 45, 99, 114, 99, 51, 50, 58, 116, 106, 122, 55, 122, 81, 61, 61, 13, 10, 13, 10}; byte[] toModify = new byte[original.length]; System.arraycopy(original, 0, toModify, 0, original.length); AsyncRequestBody body = ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromBytes(toModify)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amzn-checksum-crc32") - .build(); + .asyncRequestBody(AsyncRequestBody.fromBytes(toModify)) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amzn-checksum-crc32") + .build(); for (int i = 0; i < toModify.length; ++i) { toModify[i]++; } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java new file mode 100644 index 00000000000..0966ea6eb76 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java @@ -0,0 +1,279 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.utils.BinaryUtils; + +public class SplittingPublisherTest { + private static final int CHUNK_SIZE = 5; + + private static final int CONTENT_SIZE = 101; + private static final byte[] CONTENT = + RandomStringUtils.randomAscii(CONTENT_SIZE).getBytes(Charset.defaultCharset()); + + private static final int NUM_OF_CHUNK = (int) Math.ceil(CONTENT_SIZE / (double) CHUNK_SIZE); + + private static File testFile; + + @BeforeAll + public static void beforeAll() throws IOException { + testFile = File.createTempFile("SplittingPublisherTest", UUID.randomUUID().toString()); + Files.write(testFile.toPath(), CONTENT); + } + + @AfterAll + public static void afterAll() throws Exception { + testFile.delete(); + } + + @Test + public void split_contentUnknownMaxMemorySmallerThanChunkSize_shouldThrowException() { + AsyncRequestBody body = AsyncRequestBody.fromPublisher(s -> { + }); + assertThatThrownBy(() -> SplittingPublisher.builder() + .asyncRequestBody(body) + .chunkSizeInBytes(10L) + .bufferSizeInBytes(5L) + .build()) + .hasMessageContaining("must be larger than or equal"); + } + + @ParameterizedTest + @ValueSource(ints = {CHUNK_SIZE, CHUNK_SIZE * 2 - 1, CHUNK_SIZE * 2}) + void differentChunkSize_shouldSplitAsyncRequestBodyCorrectly(int chunkSize) throws Exception { + + FileAsyncRequestBody fileAsyncRequestBody = FileAsyncRequestBody.builder() + .path(testFile.toPath()) + .chunkSizeInBytes(chunkSize) + .build(); + verifySplitContent(fileAsyncRequestBody, chunkSize); + } + + @ParameterizedTest + @ValueSource(ints = {CHUNK_SIZE, CHUNK_SIZE * 2 - 1, CHUNK_SIZE * 2}) + void differentChunkSize_byteArrayShouldSplitAsyncRequestBodyCorrectly(int chunkSize) throws Exception { + verifySplitContent(AsyncRequestBody.fromBytes(CONTENT), chunkSize); + } + + @Test + void contentLengthNotPresent_shouldHandle() throws Exception { + CompletableFuture future = new CompletableFuture<>(); + TestAsyncRequestBody asyncRequestBody = new TestAsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + }; + SplittingPublisher splittingPublisher = SplittingPublisher.builder() + .asyncRequestBody(asyncRequestBody) + .chunkSizeInBytes((long) CHUNK_SIZE) + .bufferSizeInBytes(10L) + .build(); + + + List> futures = new ArrayList<>(); + AtomicInteger index = new AtomicInteger(0); + + splittingPublisher.subscribe(requestBody -> { + CompletableFuture baosFuture = new CompletableFuture<>(); + BaosSubscriber subscriber = new BaosSubscriber(baosFuture); + futures.add(baosFuture); + requestBody.subscribe(subscriber); + if (index.incrementAndGet() == NUM_OF_CHUNK) { + assertThat(requestBody.contentLength()).hasValue(1L); + } else { + assertThat(requestBody.contentLength()).hasValue((long) CHUNK_SIZE); + } + }).get(5, TimeUnit.SECONDS); + assertThat(futures.size()).isEqualTo(NUM_OF_CHUNK); + + for (int i = 0; i < futures.size(); i++) { + try (ByteArrayInputStream inputStream = new ByteArrayInputStream(CONTENT)) { + byte[] expected; + if (i == futures.size() - 1) { + expected = new byte[1]; + } else { + expected = new byte[CHUNK_SIZE]; + } + inputStream.skip(i * CHUNK_SIZE); + inputStream.read(expected); + byte[] actualBytes = futures.get(i).join(); + assertThat(actualBytes).isEqualTo(expected); + }; + } + + } + + + private static void verifySplitContent(AsyncRequestBody asyncRequestBody, int chunkSize) throws Exception { + SplittingPublisher splittingPublisher = SplittingPublisher.builder() + .asyncRequestBody(asyncRequestBody) + .chunkSizeInBytes((long) chunkSize) + .bufferSizeInBytes((long) chunkSize * 4) + .build(); + + List> futures = new ArrayList<>(); + + splittingPublisher.subscribe(requestBody -> { + CompletableFuture baosFuture = new CompletableFuture<>(); + BaosSubscriber subscriber = new BaosSubscriber(baosFuture); + futures.add(baosFuture); + requestBody.subscribe(subscriber); + }).get(5, TimeUnit.SECONDS); + + assertThat(futures.size()).isEqualTo((int) Math.ceil(CONTENT_SIZE / (double) chunkSize)); + + for (int i = 0; i < futures.size(); i++) { + try (FileInputStream fileInputStream = new FileInputStream(testFile)) { + byte[] expected; + if (i == futures.size() - 1) { + int lastChunk = CONTENT_SIZE % chunkSize == 0 ? chunkSize : (CONTENT_SIZE % chunkSize); + expected = new byte[lastChunk]; + } else { + expected = new byte[chunkSize]; + } + fileInputStream.skip(i * chunkSize); + fileInputStream.read(expected); + byte[] actualBytes = futures.get(i).join(); + assertThat(actualBytes).isEqualTo(expected); + }; + } + } + + private static class TestAsyncRequestBody implements AsyncRequestBody { + private volatile boolean cancelled; + private volatile boolean isDone; + + @Override + public Optional contentLength() { + return Optional.of((long) CONTENT.length); + } + + @Override + public void subscribe(Subscriber s) { + s.onSubscribe(new Subscription() { + @Override + public void request(long n) { + if (isDone) { + return; + } + isDone = true; + s.onNext(ByteBuffer.wrap(CONTENT)); + s.onComplete(); + + } + + @Override + public void cancel() { + cancelled = true; + } + }); + + } + } + + private static final class OnlyRequestOnceSubscriber implements Subscriber { + private List asyncRequestBodies = new ArrayList<>(); + + @Override + public void onSubscribe(Subscription s) { + s.request(1); + } + + @Override + public void onNext(AsyncRequestBody requestBody) { + asyncRequestBodies.add(requestBody); + } + + @Override + public void onError(Throwable t) { + + } + + @Override + public void onComplete() { + + } + } + + private static final class BaosSubscriber implements Subscriber { + private final CompletableFuture resultFuture; + + private ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + private Subscription subscription; + + BaosSubscriber(CompletableFuture resultFuture) { + this.resultFuture = resultFuture; + } + + @Override + public void onSubscribe(Subscription s) { + if (this.subscription != null) { + s.cancel(); + return; + } + this.subscription = s; + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + invokeSafely(() -> baos.write(BinaryUtils.copyBytesFrom(byteBuffer))); + subscription.request(1); + } + + @Override + public void onError(Throwable throwable) { + baos = null; + resultFuture.completeExceptionally(throwable); + } + + @Override + public void onComplete() { + resultFuture.complete(baos.toByteArray()); + } + } +} diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index fb9fd484b5f..57efd1b26ea 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 1cc93f2fc23..4035dce765c 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT apache-client diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 5def14642ce..6cfc5639362 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientWireMockTest.java b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientWireMockTest.java index e2dcbf32e27..e43e8d4b072 100644 --- a/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientWireMockTest.java +++ b/http-clients/aws-crt-client/src/test/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientWireMockTest.java @@ -29,9 +29,6 @@ import com.github.tomakehurst.wiremock.junit.WireMockRule; import java.net.URI; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -45,14 +42,10 @@ import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.http.async.AsyncExecuteRequest; import software.amazon.awssdk.http.async.SdkAsyncHttpClient; -import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.utils.AttributeMap; -import software.amazon.awssdk.utils.Logger; public class AwsCrtHttpClientWireMockTest { - private static final Logger log = Logger.loggerFor(AwsCrtHttpClientWireMockTest.class); - @Rule public WireMockRule mockServer = new WireMockRule(wireMockConfig() .dynamicPort()); diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index ae5ec6d9c03..bf36a80542d 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/http-clients/pom.xml b/http-clients/pom.xml index 17c128b4f64..0bb3c35ddaa 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index abeaee121d0..eef4dcc4925 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index c3c28390cd4..6618f5733e7 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index 81035ca9977..03561273bd0 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index 8896c05dfb1..ae803de3621 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -91,7 +91,7 @@ ${project.version} - 2.20.108 + 2.20.125 2.13.2 2.13.4.2 2.13.2 @@ -115,7 +115,7 @@ 2.2.21 1.15 1.29 - 0.22.2 + 0.24.0 5.8.1 diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index c5cccfd6120..12d2868b89e 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../pom.xml release-scripts diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index 9d5f7bf7758..d92f5545390 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/document/JsonStringFormatHelper.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/document/JsonStringFormatHelper.java index 4fa04cb1b8f..8dbabc4830b 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/document/JsonStringFormatHelper.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/document/JsonStringFormatHelper.java @@ -74,9 +74,6 @@ public static String addEscapeCharacters(String input) { case '\"': output.append("\\\""); // double-quote character break; - case '\'': - output.append("\\'"); // single-quote character - break; default: output.append(ch); break; diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/document/EnhancedDocumentTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/document/EnhancedDocumentTest.java index 5edd69ab7be..71717ce6fec 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/document/EnhancedDocumentTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/document/EnhancedDocumentTest.java @@ -23,6 +23,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static software.amazon.awssdk.enhanced.dynamodb.document.EnhancedDocumentTestData.testDataInstance; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import java.math.BigDecimal; import java.time.LocalDate; import java.util.ArrayList; @@ -55,6 +57,8 @@ private static Stream escapeDocumentStrings() { return Stream.of( Arguments.of(String.valueOf(c), "{\"key\":\"\\n\"}") , Arguments.of("", "{\"key\":\"\"}") + , Arguments.of("\"", "{\"key\":\"\\\"\"}") + , Arguments.of("\\", "{\"key\":\"\\\\\"}") , Arguments.of(" ", "{\"key\":\" \"}") , Arguments.of("\t", "{\"key\":\"\\t\"}") , Arguments.of("\n", "{\"key\":\"\\n\"}") @@ -63,6 +67,13 @@ private static Stream escapeDocumentStrings() { ); } + private static Stream unEscapeDocumentStrings() { + return Stream.of( + Arguments.of("'", "{\"key\":\"'\"}"), + Arguments.of("'single quote'", "{\"key\":\"'single quote'\"}") + ); + } + @Test void enhancedDocumentGetters() { @@ -337,13 +348,27 @@ void invalidKeyNames(String escapingString) { @ParameterizedTest @MethodSource("escapeDocumentStrings") - void escapingTheValues(String escapingString, String expectedJson) { + void escapingTheValues(String escapingString, String expectedJson) throws JsonProcessingException { + + EnhancedDocument document = EnhancedDocument.builder() + .attributeConverterProviders(defaultProvider()) + .putString("key", escapingString) + .build(); + assertThat(document.toJson()).isEqualTo(expectedJson); + assertThat(new ObjectMapper().readTree(document.toJson())).isNotNull(); + } + + @ParameterizedTest + @MethodSource("unEscapeDocumentStrings") + void unEscapingTheValues(String escapingString, String expectedJson) throws JsonProcessingException { EnhancedDocument document = EnhancedDocument.builder() .attributeConverterProviders(defaultProvider()) .putString("key", escapingString) .build(); assertThat(document.toJson()).isEqualTo(expectedJson); + assertThat(new ObjectMapper().readTree(document.toJson())).isNotNull(); + } @Test diff --git a/services-custom/iam-policy-builder/pom.xml b/services-custom/iam-policy-builder/pom.xml index 70a1485ea10..12012995d49 100644 --- a/services-custom/iam-policy-builder/pom.xml +++ b/services-custom/iam-policy-builder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml iam-policy-builder diff --git a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java index fd3b4d663b9..0fd0354a395 100644 --- a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java +++ b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java @@ -55,7 +55,7 @@ * Create a new IAM identity policy that allows a role to write items to an Amazon DynamoDB table. * {@snippet : * // IamClient requires a dependency on software.amazon.awssdk:iam - * try (IamClient iam = IamClient.create()) { + * try (IamClient iam = IamClient.builder().region(Region.AWS_GLOBAL).build()) { * IamPolicy policy = * IamPolicy.builder() * .addStatement(IamStatement.builder() @@ -73,7 +73,7 @@ * Download the policy uploaded in the previous example and create a new policy with "read" access added to it. * {@snippet : * // IamClient requires a dependency on software.amazon.awssdk:iam - * try (IamClient iam = IamClient.create()) { + * try (IamClient iam = IamClient.builder().region(Region.AWS_GLOBAL).build()) { * String policyArn = "arn:aws:iam::123456789012:policy/AllowWriteBookMetadata"; * GetPolicyResponse getPolicyResponse = iam.getPolicy(r -> r.policyArn(policyArn)); * diff --git a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicyReader.java b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicyReader.java index 92e6505548a..6c87957e52f 100644 --- a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicyReader.java +++ b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicyReader.java @@ -27,7 +27,7 @@ * Log the number of statements in a policy downloaded from IAM. * {@snippet : * // IamClient requires a dependency on software.amazon.awssdk:iam - * try (IamClient iam = IamClient.create()) { + * try (IamClient iam = IamClient.builder().region(Region.AWS_GLOBAL).build()) { * String policyArn = "arn:aws:iam::123456789012:policy/AllowWriteBookMetadata"; * GetPolicyResponse getPolicyResponse = iam.getPolicy(r -> r.policyArn(policyArn)); * diff --git a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicyWriter.java b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicyWriter.java index 2b64ccfb2ba..faaffa73feb 100644 --- a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicyWriter.java +++ b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicyWriter.java @@ -28,7 +28,7 @@ * Create a new IAM identity policy that allows a role to write items to an Amazon DynamoDB table. * {@snippet : * // IamClient requires a dependency on software.amazon.awssdk:iam - * try (IamClient iam = IamClient.create()) { + * try (IamClient iam = IamClient.builder().region(Region.AWS_GLOBAL).build()) { * IamPolicy policy = * IamPolicy.builder() * .addStatement(IamStatement.builder() diff --git a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/internal/DefaultIamPolicyWriter.java b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/internal/DefaultIamPolicyWriter.java index 553df49d3e1..b63021af221 100644 --- a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/internal/DefaultIamPolicyWriter.java +++ b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/internal/DefaultIamPolicyWriter.java @@ -143,7 +143,7 @@ private void writeStatement(JsonWriter writer, IamStatement statement) { writePrincipals(writer, "NotPrincipal", statement.notPrincipals()); writeValueArrayField(writer, "Action", statement.actions()); writeValueArrayField(writer, "NotAction", statement.notActions()); - writeValueArrayField(writer, "Resource", statement.actions()); + writeValueArrayField(writer, "Resource", statement.resources()); writeValueArrayField(writer, "NotResource", statement.notResources()); writeConditions(writer, statement.conditions()); writer.writeEndObject(); diff --git a/services-custom/iam-policy-builder/src/test/java/software/amazon/awssdk/policybuilder/iam/IamPolicyReaderTest.java b/services-custom/iam-policy-builder/src/test/java/software/amazon/awssdk/policybuilder/iam/IamPolicyReaderTest.java index 4b1f5d341b0..89112452d64 100644 --- a/services-custom/iam-policy-builder/src/test/java/software/amazon/awssdk/policybuilder/iam/IamPolicyReaderTest.java +++ b/services-custom/iam-policy-builder/src/test/java/software/amazon/awssdk/policybuilder/iam/IamPolicyReaderTest.java @@ -23,12 +23,18 @@ import org.junit.jupiter.api.Test; class IamPolicyReaderTest { - private static final IamPrincipal PRINCIPAL_1 = IamPrincipal.ALL; - private static final IamPrincipal PRINCIPAL_2 = IamPrincipal.create("2", "*"); - private static final IamResource RESOURCE_1 = IamResource.create("1"); - private static final IamResource RESOURCE_2 = IamResource.create("2"); - private static final IamAction ACTION_1 = IamAction.create("1"); - private static final IamAction ACTION_2 = IamAction.create("2"); + private static final IamPrincipal PRINCIPAL_1 = IamPrincipal.create("P1", "*"); + private static final IamPrincipal PRINCIPAL_2 = IamPrincipal.create("P2", "*"); + private static final IamPrincipal NOT_PRINCIPAL_1 = IamPrincipal.create("NP1", "*"); + private static final IamPrincipal NOT_PRINCIPAL_2 = IamPrincipal.create("NP2", "*"); + private static final IamResource RESOURCE_1 = IamResource.create("R1"); + private static final IamResource RESOURCE_2 = IamResource.create("R2"); + private static final IamResource NOT_RESOURCE_1 = IamResource.create("NR1"); + private static final IamResource NOT_RESOURCE_2 = IamResource.create("NR2"); + private static final IamAction ACTION_1 = IamAction.create("A1"); + private static final IamAction ACTION_2 = IamAction.create("A2"); + private static final IamAction NOT_ACTION_1 = IamAction.create("NA1"); + private static final IamAction NOT_ACTION_2 = IamAction.create("NA2"); private static final IamCondition CONDITION_1 = IamCondition.create("1", "K1", "V1"); private static final IamCondition CONDITION_2 = IamCondition.create("1", "K2", "V1"); private static final IamCondition CONDITION_3 = IamCondition.create("1", "K2", "V2"); @@ -39,11 +45,11 @@ class IamPolicyReaderTest { .effect(ALLOW) .sid("Sid") .principals(asList(PRINCIPAL_1, PRINCIPAL_2)) - .notPrincipals(asList(PRINCIPAL_1, PRINCIPAL_2)) + .notPrincipals(asList(NOT_PRINCIPAL_1, NOT_PRINCIPAL_2)) .resources(asList(RESOURCE_1, RESOURCE_2)) - .notResources(asList(RESOURCE_1, RESOURCE_2)) + .notResources(asList(NOT_RESOURCE_1, NOT_RESOURCE_2)) .actions(asList(ACTION_1, ACTION_2)) - .notActions(asList(ACTION_1, ACTION_2)) + .notActions(asList(NOT_ACTION_1, NOT_ACTION_2)) .conditions(asList(CONDITION_1, CONDITION_2, CONDITION_3, CONDITION_4)) .build(); @@ -66,12 +72,12 @@ class IamPolicyReaderTest { IamStatement.builder() .effect(ALLOW) .sid("Sid") - .principals(singletonList(PRINCIPAL_1)) - .notPrincipals(singletonList(PRINCIPAL_1)) + .principals(singletonList(IamPrincipal.ALL)) + .notPrincipals(singletonList(IamPrincipal.ALL)) .resources(singletonList(RESOURCE_1)) - .notResources(singletonList(RESOURCE_1)) + .notResources(singletonList(NOT_RESOURCE_1)) .actions(singletonList(ACTION_1)) - .notActions(singletonList(ACTION_1)) + .notActions(singletonList(NOT_ACTION_1)) .conditions(singletonList(CONDITION_1)) .build(); @@ -85,17 +91,6 @@ class IamPolicyReaderTest { @Test public void readFullPolicyWorks() { - assertThat(READER.read("{\"Version\":\"Version\"," - + "\"Id\":\"Id\"," - + "\"Statement\":[" - + "{\"Sid\":\"Sid\",\"Effect\":\"Allow\",\"Principal\":{\"*\":\"*\",\"2\":\"*\"},\"NotPrincipal\":{\"*\":\"*\",\"2\":\"*\"},\"Action\":[\"1\",\"2\"],\"NotAction\":[\"1\",\"2\"],\"Resource\":[\"1\",\"2\"],\"NotResource\":[\"1\",\"2\"],\"Condition\":{\"1\":{\"K1\":\"V1\",\"K2\":[\"V1\",\"V2\"]},\"2\":{\"K1\":\"V1\"}}}," - + "{\"Sid\":\"Sid\",\"Effect\":\"Allow\",\"Principal\":{\"*\":\"*\",\"2\":\"*\"},\"NotPrincipal\":{\"*\":\"*\",\"2\":\"*\"},\"Action\":[\"1\",\"2\"],\"NotAction\":[\"1\",\"2\"],\"Resource\":[\"1\",\"2\"],\"NotResource\":[\"1\",\"2\"],\"Condition\":{\"1\":{\"K1\":\"V1\",\"K2\":[\"V1\",\"V2\"]},\"2\":{\"K1\":\"V1\"}}}" - + "]}")) - .isEqualTo(FULL_POLICY); - } - - @Test - public void prettyWriteFullPolicyWorks() { assertThat(READER.read("{\n" + " \"Version\" : \"Version\",\n" + " \"Id\" : \"Id\",\n" @@ -103,17 +98,17 @@ public void prettyWriteFullPolicyWorks() { + " \"Sid\" : \"Sid\",\n" + " \"Effect\" : \"Allow\",\n" + " \"Principal\" : {\n" - + " \"*\" : \"*\",\n" - + " \"2\" : \"*\"\n" + + " \"P1\" : \"*\",\n" + + " \"P2\" : \"*\"\n" + " },\n" + " \"NotPrincipal\" : {\n" - + " \"*\" : \"*\",\n" - + " \"2\" : \"*\"\n" + + " \"NP1\" : \"*\",\n" + + " \"NP2\" : \"*\"\n" + " },\n" - + " \"Action\" : [ \"1\", \"2\" ],\n" - + " \"NotAction\" : [ \"1\", \"2\" ],\n" - + " \"Resource\" : [ \"1\", \"2\" ],\n" - + " \"NotResource\" : [ \"1\", \"2\" ],\n" + + " \"Action\" : [ \"A1\", \"A2\" ],\n" + + " \"NotAction\" : [ \"NA1\", \"NA2\" ],\n" + + " \"Resource\" : [ \"R1\", \"R2\" ],\n" + + " \"NotResource\" : [ \"NR1\", \"NR2\" ],\n" + " \"Condition\" : {\n" + " \"1\" : {\n" + " \"K1\" : \"V1\",\n" @@ -127,17 +122,17 @@ public void prettyWriteFullPolicyWorks() { + " \"Sid\" : \"Sid\",\n" + " \"Effect\" : \"Allow\",\n" + " \"Principal\" : {\n" - + " \"*\" : \"*\",\n" - + " \"2\" : \"*\"\n" + + " \"P1\" : \"*\",\n" + + " \"P2\" : \"*\"\n" + " },\n" + " \"NotPrincipal\" : {\n" - + " \"*\" : \"*\",\n" - + " \"2\" : \"*\"\n" + + " \"NP1\" : \"*\",\n" + + " \"NP2\" : \"*\"\n" + " },\n" - + " \"Action\" : [ \"1\", \"2\" ],\n" - + " \"NotAction\" : [ \"1\", \"2\" ],\n" - + " \"Resource\" : [ \"1\", \"2\" ],\n" - + " \"NotResource\" : [ \"1\", \"2\" ],\n" + + " \"Action\" : [ \"A1\", \"A2\" ],\n" + + " \"NotAction\" : [ \"NA1\", \"NA2\" ],\n" + + " \"Resource\" : [ \"R1\", \"R2\" ],\n" + + " \"NotResource\" : [ \"NR1\", \"NR2\" ],\n" + " \"Condition\" : {\n" + " \"1\" : {\n" + " \"K1\" : \"V1\",\n" @@ -153,7 +148,7 @@ public void prettyWriteFullPolicyWorks() { } @Test - public void writeMinimalPolicyWorks() { + public void readMinimalPolicyWorks() { assertThat(READER.read("{\n" + " \"Version\" : \"Version\",\n" + " \"Statement\" : {\n" @@ -164,7 +159,7 @@ public void writeMinimalPolicyWorks() { } @Test - public void singleElementListsAreWrittenAsNonArrays() { + public void singleElementListsAreSupported() { assertThat(READER.read("{\n" + " \"Version\" : \"Version\",\n" + " \"Statement\" : {\n" @@ -172,10 +167,10 @@ public void singleElementListsAreWrittenAsNonArrays() { + " \"Effect\" : \"Allow\",\n" + " \"Principal\" : \"*\",\n" + " \"NotPrincipal\" : \"*\",\n" - + " \"Action\" : \"1\",\n" - + " \"NotAction\" : \"1\",\n" - + " \"Resource\" : \"1\",\n" - + " \"NotResource\" : \"1\",\n" + + " \"Action\" : \"A1\",\n" + + " \"NotAction\" : \"NA1\",\n" + + " \"Resource\" : \"R1\",\n" + + " \"NotResource\" : \"NR1\",\n" + " \"Condition\" : {\n" + " \"1\" : {\n" + " \"K1\" : \"V1\"\n" diff --git a/services-custom/iam-policy-builder/src/test/java/software/amazon/awssdk/policybuilder/iam/IamPolicyWriterTest.java b/services-custom/iam-policy-builder/src/test/java/software/amazon/awssdk/policybuilder/iam/IamPolicyWriterTest.java index 8e2c6811b66..9b0a932fa62 100644 --- a/services-custom/iam-policy-builder/src/test/java/software/amazon/awssdk/policybuilder/iam/IamPolicyWriterTest.java +++ b/services-custom/iam-policy-builder/src/test/java/software/amazon/awssdk/policybuilder/iam/IamPolicyWriterTest.java @@ -23,12 +23,18 @@ import org.junit.jupiter.api.Test; class IamPolicyWriterTest { - private static final IamPrincipal PRINCIPAL_1 = IamPrincipal.create("1", "*"); - private static final IamPrincipal PRINCIPAL_2 = IamPrincipal.create("2", "*"); - private static final IamResource RESOURCE_1 = IamResource.create("1"); - private static final IamResource RESOURCE_2 = IamResource.create("2"); - private static final IamAction ACTION_1 = IamAction.create("1"); - private static final IamAction ACTION_2 = IamAction.create("2"); + private static final IamPrincipal PRINCIPAL_1 = IamPrincipal.create("P1", "*"); + private static final IamPrincipal PRINCIPAL_2 = IamPrincipal.create("P2", "*"); + private static final IamPrincipal NOT_PRINCIPAL_1 = IamPrincipal.create("NP1", "*"); + private static final IamPrincipal NOT_PRINCIPAL_2 = IamPrincipal.create("NP2", "*"); + private static final IamResource RESOURCE_1 = IamResource.create("R1"); + private static final IamResource RESOURCE_2 = IamResource.create("R2"); + private static final IamResource NOT_RESOURCE_1 = IamResource.create("NR1"); + private static final IamResource NOT_RESOURCE_2 = IamResource.create("NR2"); + private static final IamAction ACTION_1 = IamAction.create("A1"); + private static final IamAction ACTION_2 = IamAction.create("A2"); + private static final IamAction NOT_ACTION_1 = IamAction.create("NA1"); + private static final IamAction NOT_ACTION_2 = IamAction.create("NA2"); private static final IamCondition CONDITION_1 = IamCondition.create("1", "K1", "V1"); private static final IamCondition CONDITION_2 = IamCondition.create("2", "K1", "V1"); private static final IamCondition CONDITION_3 = IamCondition.create("1", "K2", "V1"); @@ -39,11 +45,11 @@ class IamPolicyWriterTest { .effect(ALLOW) .sid("Sid") .principals(asList(PRINCIPAL_1, PRINCIPAL_2)) - .notPrincipals(asList(PRINCIPAL_1, PRINCIPAL_2)) + .notPrincipals(asList(NOT_PRINCIPAL_1, NOT_PRINCIPAL_2)) .resources(asList(RESOURCE_1, RESOURCE_2)) - .notResources(asList(RESOURCE_1, RESOURCE_2)) + .notResources(asList(NOT_RESOURCE_1, NOT_RESOURCE_2)) .actions(asList(ACTION_1, ACTION_2)) - .notActions(asList(ACTION_1, ACTION_2)) + .notActions(asList(NOT_ACTION_1, NOT_ACTION_2)) .conditions(asList(CONDITION_1, CONDITION_2, CONDITION_3, CONDITION_4)) .build(); @@ -69,9 +75,9 @@ class IamPolicyWriterTest { .principals(singletonList(IamPrincipal.ALL)) .notPrincipals(singletonList(IamPrincipal.ALL)) .resources(singletonList(RESOURCE_1)) - .notResources(singletonList(RESOURCE_1)) + .notResources(singletonList(NOT_RESOURCE_1)) .actions(singletonList(ACTION_1)) - .notActions(singletonList(ACTION_1)) + .notActions(singletonList(NOT_ACTION_1)) .conditions(singletonList(CONDITION_1)) .build(); @@ -95,8 +101,8 @@ public void writeFullPolicyWorks() { .isEqualTo("{\"Version\":\"Version\"," + "\"Id\":\"Id\"," + "\"Statement\":[" - + "{\"Sid\":\"Sid\",\"Effect\":\"Allow\",\"Principal\":{\"1\":\"*\",\"2\":\"*\"},\"NotPrincipal\":{\"1\":\"*\",\"2\":\"*\"},\"Action\":[\"1\",\"2\"],\"NotAction\":[\"1\",\"2\"],\"Resource\":[\"1\",\"2\"],\"NotResource\":[\"1\",\"2\"],\"Condition\":{\"1\":{\"K1\":\"V1\",\"K2\":[\"V1\",\"V2\"]},\"2\":{\"K1\":\"V1\"}}}," - + "{\"Sid\":\"Sid\",\"Effect\":\"Allow\",\"Principal\":{\"1\":\"*\",\"2\":\"*\"},\"NotPrincipal\":{\"1\":\"*\",\"2\":\"*\"},\"Action\":[\"1\",\"2\"],\"NotAction\":[\"1\",\"2\"],\"Resource\":[\"1\",\"2\"],\"NotResource\":[\"1\",\"2\"],\"Condition\":{\"1\":{\"K1\":\"V1\",\"K2\":[\"V1\",\"V2\"]},\"2\":{\"K1\":\"V1\"}}}" + + "{\"Sid\":\"Sid\",\"Effect\":\"Allow\",\"Principal\":{\"P1\":\"*\",\"P2\":\"*\"},\"NotPrincipal\":{\"NP1\":\"*\",\"NP2\":\"*\"},\"Action\":[\"A1\",\"A2\"],\"NotAction\":[\"NA1\",\"NA2\"],\"Resource\":[\"R1\",\"R2\"],\"NotResource\":[\"NR1\",\"NR2\"],\"Condition\":{\"1\":{\"K1\":\"V1\",\"K2\":[\"V1\",\"V2\"]},\"2\":{\"K1\":\"V1\"}}}," + + "{\"Sid\":\"Sid\",\"Effect\":\"Allow\",\"Principal\":{\"P1\":\"*\",\"P2\":\"*\"},\"NotPrincipal\":{\"NP1\":\"*\",\"NP2\":\"*\"},\"Action\":[\"A1\",\"A2\"],\"NotAction\":[\"NA1\",\"NA2\"],\"Resource\":[\"R1\",\"R2\"],\"NotResource\":[\"NR1\",\"NR2\"],\"Condition\":{\"1\":{\"K1\":\"V1\",\"K2\":[\"V1\",\"V2\"]},\"2\":{\"K1\":\"V1\"}}}" + "]}"); } @@ -110,17 +116,17 @@ public void prettyWriteFullPolicyWorks() { + " \"Sid\" : \"Sid\",\n" + " \"Effect\" : \"Allow\",\n" + " \"Principal\" : {\n" - + " \"1\" : \"*\",\n" - + " \"2\" : \"*\"\n" + + " \"P1\" : \"*\",\n" + + " \"P2\" : \"*\"\n" + " },\n" + " \"NotPrincipal\" : {\n" - + " \"1\" : \"*\",\n" - + " \"2\" : \"*\"\n" + + " \"NP1\" : \"*\",\n" + + " \"NP2\" : \"*\"\n" + " },\n" - + " \"Action\" : [ \"1\", \"2\" ],\n" - + " \"NotAction\" : [ \"1\", \"2\" ],\n" - + " \"Resource\" : [ \"1\", \"2\" ],\n" - + " \"NotResource\" : [ \"1\", \"2\" ],\n" + + " \"Action\" : [ \"A1\", \"A2\" ],\n" + + " \"NotAction\" : [ \"NA1\", \"NA2\" ],\n" + + " \"Resource\" : [ \"R1\", \"R2\" ],\n" + + " \"NotResource\" : [ \"NR1\", \"NR2\" ],\n" + " \"Condition\" : {\n" + " \"1\" : {\n" + " \"K1\" : \"V1\",\n" @@ -134,17 +140,17 @@ public void prettyWriteFullPolicyWorks() { + " \"Sid\" : \"Sid\",\n" + " \"Effect\" : \"Allow\",\n" + " \"Principal\" : {\n" - + " \"1\" : \"*\",\n" - + " \"2\" : \"*\"\n" + + " \"P1\" : \"*\",\n" + + " \"P2\" : \"*\"\n" + " },\n" + " \"NotPrincipal\" : {\n" - + " \"1\" : \"*\",\n" - + " \"2\" : \"*\"\n" + + " \"NP1\" : \"*\",\n" + + " \"NP2\" : \"*\"\n" + " },\n" - + " \"Action\" : [ \"1\", \"2\" ],\n" - + " \"NotAction\" : [ \"1\", \"2\" ],\n" - + " \"Resource\" : [ \"1\", \"2\" ],\n" - + " \"NotResource\" : [ \"1\", \"2\" ],\n" + + " \"Action\" : [ \"A1\", \"A2\" ],\n" + + " \"NotAction\" : [ \"NA1\", \"NA2\" ],\n" + + " \"Resource\" : [ \"R1\", \"R2\" ],\n" + + " \"NotResource\" : [ \"NR1\", \"NR2\" ],\n" + " \"Condition\" : {\n" + " \"1\" : {\n" + " \"K1\" : \"V1\",\n" @@ -179,10 +185,10 @@ public void singleElementListsAreWrittenAsNonArrays() { + " \"Effect\" : \"Allow\",\n" + " \"Principal\" : \"*\",\n" + " \"NotPrincipal\" : \"*\",\n" - + " \"Action\" : \"1\",\n" - + " \"NotAction\" : \"1\",\n" - + " \"Resource\" : \"1\",\n" - + " \"NotResource\" : \"1\",\n" + + " \"Action\" : \"A1\",\n" + + " \"NotAction\" : \"NA1\",\n" + + " \"Resource\" : \"R1\",\n" + + " \"NotResource\" : \"NR1\",\n" + " \"Condition\" : {\n" + " \"1\" : {\n" + " \"K1\" : \"V1\"\n" diff --git a/services-custom/pom.xml b/services-custom/pom.xml index 5b76c2506dd..c8634fbe963 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index e0ceb12a796..002a3500039 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index d6a30f558aa..65bcc0c362b 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/account/pom.xml b/services/account/pom.xml index 25a183227a4..ba25a84b29f 100644 --- a/services/account/pom.xml +++ b/services/account/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT account AWS Java SDK :: Services :: Account diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 29f77bb2d0f..a71a52d72f6 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 1758163c0c7..3253e20cb6f 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/acmpca/src/main/resources/codegen-resources/service-2.json b/services/acmpca/src/main/resources/codegen-resources/service-2.json index 71e71b0a92b..62fbca99dbc 100644 --- a/services/acmpca/src/main/resources/codegen-resources/service-2.json +++ b/services/acmpca/src/main/resources/codegen-resources/service-2.json @@ -1406,7 +1406,7 @@ }, "IdempotencyToken":{ "shape":"IdempotencyToken", - "documentation":"

Alphanumeric string that can be used to distinguish between calls to the IssueCertificate action. Idempotency tokens for IssueCertificate time out after one minute. Therefore, if you call IssueCertificate multiple times with the same idempotency token within one minute, Amazon Web Services Private CA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, Amazon Web Services Private CA recognizes that you are requesting multiple certificates.

" + "documentation":"

Alphanumeric string that can be used to distinguish between calls to the IssueCertificate action. Idempotency tokens for IssueCertificate time out after five minutes. Therefore, if you call IssueCertificate multiple times with the same idempotency token within five minutes, Amazon Web Services Private CA recognizes that you are requesting only one certificate and will issue only one. If you change the idempotency token for each call, Amazon Web Services Private CA recognizes that you are requesting multiple certificates.

" } } }, @@ -2080,5 +2080,5 @@ ] } }, - "documentation":"

This is the Amazon Web Services Private Certificate Authority API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing a private certificate authority (CA) for your organization.

The documentation for each action shows the API request parameters and the JSON response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you prefer. For more information, see Amazon Web Services SDKs.

Each Amazon Web Services Private CA API operation has a quota that determines the number of times the operation can be called per second. Amazon Web Services Private CA throttles API requests at different rates depending on the operation. Throttling means that Amazon Web Services Private CA rejects an otherwise valid request because the request exceeds the operation's quota for the number of requests per second. When a request is throttled, Amazon Web Services Private CA returns a ThrottlingException error. Amazon Web Services Private CA does not guarantee a minimum request rate for APIs.

To see an up-to-date list of your Amazon Web Services Private CA quotas, or to request a quota increase, log into your Amazon Web Services account and visit the Service Quotas console.

" + "documentation":"

This is the Amazon Web Services Private Certificate Authority API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing a private certificate authority (CA) for your organization.

The documentation for each action shows the API request parameters and the JSON response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you prefer. For more information, see Amazon Web Services SDKs.

Each Amazon Web Services Private CA API operation has a quota that determines the number of times the operation can be called per second. Amazon Web Services Private CA throttles API requests at different rates depending on the operation. Throttling means that Amazon Web Services Private CA rejects an otherwise valid request because the request exceeds the operation's quota for the number of requests per second. When a request is throttled, Amazon Web Services Private CA returns a ThrottlingException error. Amazon Web Services Private CA does not guarantee a minimum request rate for APIs.

To see an up-to-date list of your Amazon Web Services Private CA quotas, or to request a quota increase, log into your Amazon Web Services account and visit the Service Quotas console.

" } diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index 6b3298c867f..dcae2f09462 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/amp/pom.xml b/services/amp/pom.xml index 07213090a7c..0a90c7e6e5d 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 32918df996b..0dbc6c19bfb 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index db973b2e27b..af367f1aa95 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifybackend/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/amplifybackend/src/main/resources/codegen-resources/endpoint-rule-set.json index 46ca9fee12a..51ede4921f3 100644 --- a/services/amplifybackend/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/amplifybackend/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,64 +45,17 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "ref": "UseFIPS" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + true ] } - ] + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" }, { "conditions": [ @@ -111,19 +63,51 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "booleanEquals", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -133,90 +117,109 @@ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://amplifybackend-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://amplifybackend-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -229,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://amplifybackend.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://amplifybackend.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://amplifybackend.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://amplifybackend.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/amplifybackend/src/main/resources/codegen-resources/endpoint-tests.json b/services/amplifybackend/src/main/resources/codegen-resources/endpoint-tests.json index 0bc8d1e6539..480b035e9f9 100644 --- a/services/amplifybackend/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/amplifybackend/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ap-south-1.amazonaws.com" + "url": "https://amplifybackend.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.ap-south-1.api.aws" + "url": "https://amplifybackend.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,48 +34,35 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ca-central-1.amazonaws.com" + "url": "https://amplifybackend.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.ca-central-1.api.aws" + "url": "https://amplifybackend.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -99,48 +73,9 @@ } }, "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -151,152 +86,9 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -307,100 +99,22 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.eu-west-2.api.aws" + "url": "https://amplifybackend.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -411,490 +125,365 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ap-northeast-2.api.aws" + "url": "https://amplifybackend.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ap-northeast-2.amazonaws.com" + "url": "https://amplifybackend.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.ap-northeast-2.api.aws" + "url": "https://amplifybackend.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.ap-northeast-2.amazonaws.com" + "url": "https://amplifybackend.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ap-northeast-1.api.aws" + "url": "https://amplifybackend.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ap-northeast-1.amazonaws.com" + "url": "https://amplifybackend.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.ap-northeast-1.api.aws" + "url": "https://amplifybackend.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://amplifybackend.ap-northeast-1.amazonaws.com" + "url": "https://amplifybackend-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.me-south-1.api.aws" + "url": "https://amplifybackend-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.me-south-1.amazonaws.com" + "url": "https://amplifybackend.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://amplifybackend.me-south-1.api.aws" + "url": "https://amplifybackend-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.sa-east-1.api.aws" + "url": "https://amplifybackend-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.sa-east-1.amazonaws.com" + "url": "https://amplifybackend.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.sa-east-1.api.aws" + "url": "https://amplifybackend.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://amplifybackend.sa-east-1.amazonaws.com" + "url": "https://amplifybackend-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ap-southeast-1.api.aws" + "url": "https://amplifybackend-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ap-southeast-1.amazonaws.com" + "url": "https://amplifybackend.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.ap-southeast-1.api.aws" + "url": "https://amplifybackend.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://amplifybackend.ap-southeast-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.ap-southeast-2.api.aws" + "url": "https://amplifybackend-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.ap-southeast-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.ap-southeast-2.api.aws" + "url": "https://amplifybackend.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://amplifybackend.ap-southeast-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend-fips.us-east-1.api.aws" + "url": "https://amplifybackend-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.us-east-1.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.us-east-1.api.aws" + "url": "https://amplifybackend.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://amplifybackend.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://amplifybackend.us-east-2.amazonaws.com" - } - }, - "params": { + "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -904,9 +493,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -916,11 +505,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/amplifybackend/src/main/resources/codegen-resources/service-2.json b/services/amplifybackend/src/main/resources/codegen-resources/service-2.json index c2173184042..56c3286a04e 100644 --- a/services/amplifybackend/src/main/resources/codegen-resources/service-2.json +++ b/services/amplifybackend/src/main/resources/codegen-resources/service-2.json @@ -1298,6 +1298,7 @@ }, "BackendAuthAppleProviderConfig": { "type": "structure", + "sensitive": true, "members": { "ClientId": { "shape": "__string", @@ -1364,6 +1365,7 @@ }, "BackendAuthSocialProviderConfig": { "type": "structure", + "sensitive": true, "members": { "ClientId": { "shape": "__string", @@ -2823,6 +2825,7 @@ }, "EmailSettings": { "type": "structure", + "sensitive": true, "members": { "EmailMessage": { "shape": "__string", @@ -4244,6 +4247,7 @@ }, "SmsSettings": { "type": "structure", + "sensitive": true, "members": { "SmsMessage": { "shape": "__string", diff --git a/services/amplifyuibuilder/pom.xml b/services/amplifyuibuilder/pom.xml index d82435b424c..46c9847752d 100644 --- a/services/amplifyuibuilder/pom.xml +++ b/services/amplifyuibuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT amplifyuibuilder AWS Java SDK :: Services :: Amplify UI Builder diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-rule-set.json index 25d1543825e..24b1f786c62 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://amplifyuibuilder-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://amplifyuibuilder-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://amplifyuibuilder-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://amplifyuibuilder-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://amplifyuibuilder.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://amplifyuibuilder.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://amplifyuibuilder.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://amplifyuibuilder.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json b/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json index a0b39beb017..6f659c6178d 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/service-2.json @@ -45,7 +45,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Creates a new form for an Amplify.

", + "documentation":"

Creates a new form for an Amplify app.

", "idempotent":true }, "CreateTheme":{ @@ -356,7 +356,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Starts a code generation job for for a specified Amplify app and backend environment.

" + "documentation":"

Starts a code generation job for a specified Amplify app and backend environment.

" }, "UpdateComponent":{ "name":"UpdateComponent", @@ -453,6 +453,25 @@ }, "documentation":"

Represents the event action configuration for an element of a Component or ComponentChild. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components. ActionParameters defines the action that is performed when an event occurs on the component.

" }, + "ApiConfiguration":{ + "type":"structure", + "members":{ + "graphQLConfig":{ + "shape":"GraphQLRenderConfig", + "documentation":"

The configuration for an application using GraphQL APIs.

" + }, + "dataStoreConfig":{ + "shape":"DataStoreRenderConfig", + "documentation":"

The configuration for an application using DataStore APIs.

" + }, + "noApiConfig":{ + "shape":"NoApiRenderConfig", + "documentation":"

The configuration for an application with no API being used.

" + } + }, + "documentation":"

Describes the API configuration for a code generation job.

", + "union":true + }, "AppId":{ "type":"string", "max":20, @@ -763,7 +782,7 @@ "documentation":"

The name of the ReactStartCodegenJobData object.

" } }, - "documentation":"

Describes the configuration information for rendering the UI component associated the code generation job.

", + "documentation":"

Describes the configuration information for rendering the UI component associated with the code generation job.

", "union":true }, "CodegenJobStatus":{ @@ -1533,6 +1552,12 @@ }, "payload":"entity" }, + "DataStoreRenderConfig":{ + "type":"structure", + "members":{ + }, + "documentation":"

Describes the DataStore configuration for an API for a code generation job.

" + }, "DeleteComponentRequest":{ "type":"structure", "required":[ @@ -2509,6 +2534,39 @@ }, "payload":"theme" }, + "GraphQLRenderConfig":{ + "type":"structure", + "required":[ + "typesFilePath", + "queriesFilePath", + "mutationsFilePath", + "subscriptionsFilePath", + "fragmentsFilePath" + ], + "members":{ + "typesFilePath":{ + "shape":"String", + "documentation":"

The path to the GraphQL types file, relative to the component output directory.

" + }, + "queriesFilePath":{ + "shape":"String", + "documentation":"

The path to the GraphQL queries file, relative to the component output directory.

" + }, + "mutationsFilePath":{ + "shape":"String", + "documentation":"

The path to the GraphQL mutations file, relative to the component output directory.

" + }, + "subscriptionsFilePath":{ + "shape":"String", + "documentation":"

The path to the GraphQL subscriptions file, relative to the component output directory.

" + }, + "fragmentsFilePath":{ + "shape":"String", + "documentation":"

The path to the GraphQL fragments file, relative to the component output directory.

" + } + }, + "documentation":"

Describes the GraphQL configuration for an API for a code generation job.

" + }, "IdentifierList":{ "type":"list", "member":{"shape":"String"} @@ -2801,6 +2859,12 @@ }, "documentation":"

Represents the state configuration when an action modifies a property of another element within the same component.

" }, + "NoApiRenderConfig":{ + "type":"structure", + "members":{ + }, + "documentation":"

Describes the configuration for an application with no API being used.

" + }, "NumValues":{ "type":"list", "member":{"shape":"Integer"} @@ -2910,6 +2974,10 @@ "inlineSourceMap":{ "shape":"Boolean", "documentation":"

Specifies whether the code generation job should render inline source maps.

" + }, + "apiConfiguration":{ + "shape":"ApiConfiguration", + "documentation":"

The API configuration for the code generation job.

" } }, "documentation":"

Describes the code generation job configuration for a React project.

" diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index 9e2e6a8ffb6..d40d9234c95 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index d151ea63a0f..608878a4083 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 0958f0edad7..68937fc6282 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 943cb9b58ac..620ab96f2a3 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://apigateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://apigateway-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://apigateway.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://apigateway.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://apigateway.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-tests.json b/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-tests.json index 6d936f9ae2e..3d5b86aed1f 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,146 +1,146 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-south-2.api.aws" + "url": "https://apigateway.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-south-2.amazonaws.com" + "url": "https://apigateway.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": true + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-south-2.api.aws" + "url": "https://apigateway.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-south-2.amazonaws.com" + "url": "https://apigateway.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-south-1.api.aws" + "url": "https://apigateway.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-south-1.amazonaws.com" + "url": "https://apigateway.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-south-1.api.aws" + "url": "https://apigateway.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-south-1.amazonaws.com" + "url": "https://apigateway.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-south-1.api.aws" + "url": "https://apigateway.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-south-1.amazonaws.com" + "url": "https://apigateway.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-south-1.api.aws" + "url": "https://apigateway.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -151,1574 +151,417 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-south-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-south-2.api.aws" + "url": "https://apigateway.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-south-2.amazonaws.com" + "url": "https://apigateway.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-south-2.api.aws" + "url": "https://apigateway.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-south-2.amazonaws.com" + "url": "https://apigateway.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": false + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-gov-east-1.api.aws" + "url": "https://apigateway.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-gov-east-1.amazonaws.com" + "url": "https://apigateway.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-gov-east-1.api.aws" + "url": "https://apigateway.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-gov-east-1.amazonaws.com" + "url": "https://apigateway.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.me-central-1.api.aws" + "url": "https://apigateway.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.me-central-1.amazonaws.com" + "url": "https://apigateway-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.me-central-1.api.aws" + "url": "https://apigateway-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.me-central-1.amazonaws.com" + "url": "https://apigateway.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ca-central-1.api.aws" + "url": "https://apigateway.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ca-central-1.amazonaws.com" + "url": "https://apigateway.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.ca-central-1.api.aws" + "url": "https://apigateway-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ca-central-1.amazonaws.com" + "url": "https://apigateway-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-central-1.api.aws" + "url": "https://apigateway.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-central-1.amazonaws.com" + "url": "https://apigateway.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-central-1.api.aws" + "url": "https://apigateway.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-central-1.amazonaws.com" + "url": "https://apigateway-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-central-2.api.aws" + "url": "https://apigateway-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-2", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.eu-central-2.amazonaws.com" + "url": "https://apigateway.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "eu-central-2", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.eu-central-2.api.aws" + "url": "https://apigateway.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "eu-central-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "eu-central-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-west-1.api.aws" + "url": "https://apigateway-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-1.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway.us-west-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-west-1.amazonaws.com" + "url": "https://apigateway-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-west-2.amazonaws.com" + "url": "https://apigateway.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-west-2.api.aws" + "url": "https://example.com" } }, "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-4.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-4", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-southeast-4.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-4", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-4.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-southeast-4", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-southeast-4.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-southeast-4", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "Region": "us-isob-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-isob-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseDualStack": true, - "Region": "us-isob-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { + "UseFIPS": false, "UseDualStack": false, - "Region": "us-isob-east-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1728,9 +571,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1740,11 +583,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json index 73b4de0df2a..367f45df1af 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json @@ -585,7 +585,7 @@ "shape" : "TooManyRequestsException", "documentation" : "

The client is sending more than the allowed number of requests per unit of time.

" } ], - "documentation" : "

Deletes a route request parameter.

" + "documentation" : "

Deletes a route request parameter. Supported only for WebSocket APIs.

" }, "DeleteRouteResponse" : { "name" : "DeleteRouteResponse", @@ -3089,7 +3089,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API itegrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" + "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -5047,7 +5047,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API itegrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" + "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -6033,7 +6033,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API itegrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" + "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -7615,7 +7615,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to the backend. The key should follow the pattern <action>:<header|querystring|path>.<location> where action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" + "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to the backend. The key should follow the pattern <action>:<header|querystring|path>.<location> where action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -7718,7 +7718,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to the backend. The key should follow the pattern <action>:<header|querystring|path>.<location> where action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" + "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to the backend. The key should follow the pattern <action>:<header|querystring|path>.<location> where action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" }, "RequestTemplates" : { "shape" : "TemplateMap", @@ -7825,7 +7825,7 @@ "RequestParameters" : { "shape" : "IntegrationParameters", "locationName" : "requestParameters", - "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API itegrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" + "documentation" : "

For WebSocket APIs, a key-value map specifying request parameters that are passed from the method request to the backend. The key is an integration request parameter name and the associated value is a method request parameter value or static value that must be enclosed within single quotes and pre-encoded as required by the backend. The method request parameter value must match the pattern of method.request.{location}.{name}\n , where \n {location}\n is querystring, path, or header; and \n {name}\n must be a valid and unique method request parameter name.

For HTTP API integrations with a specified integrationSubtype, request parameters are a key-value map specifying parameters that are passed to AWS_PROXY integrations. You can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Working with AWS service integrations for HTTP APIs.

For HTTP API integrations, without a specified integrationSubtype request parameters are a key-value map specifying how to transform HTTP requests before sending them to backend integrations. The key should follow the pattern <action>:<header|querystring|path>.<location>. The action can be append, overwrite or remove. For values, you can provide static values, or map request data, stage variables, or context variables that are evaluated at runtime. To learn more, see Transforming API requests and responses.

" }, "RequestTemplates" : { "shape" : "TemplateMap", diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index cf727211dfc..05f5db83bc2 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfigdata/pom.xml b/services/appconfigdata/pom.xml index 371999a0061..81d7c63e533 100644 --- a/services/appconfigdata/pom.xml +++ b/services/appconfigdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT appconfigdata AWS Java SDK :: Services :: App Config Data diff --git a/services/appfabric/pom.xml b/services/appfabric/pom.xml index 30471310556..8238986e198 100644 --- a/services/appfabric/pom.xml +++ b/services/appfabric/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT appfabric AWS Java SDK :: Services :: App Fabric diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index b79c3d24395..e7969fad583 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index 2eb0a9ef3e0..feed4b1f497 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 7772dfeaf5c..902912c3295 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index cd91c777451..c235229a287 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 7641d51769d..0f37c85e99b 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 6de209338a4..95f7a902d9c 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/applicationinsights/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/applicationinsights/src/main/resources/codegen-resources/endpoint-rule-set.json index 75e13ac5dd1..5aa99ae02e8 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://applicationinsights-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://applicationinsights-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://applicationinsights-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://applicationinsights.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://applicationinsights-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://applicationinsights.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://applicationinsights.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://applicationinsights.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/applicationinsights/src/main/resources/codegen-resources/endpoint-tests.json b/services/applicationinsights/src/main/resources/codegen-resources/endpoint-tests.json index 7f3fbce2a03..af6b8c47b29 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,770 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-northeast-1.amazonaws.com" + "url": "https://applicationinsights.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-northeast-1.api.aws" + "url": "https://applicationinsights.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -775,542 +34,534 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-northeast-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.me-south-1.api.aws" + "url": "https://applicationinsights.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.me-south-1.amazonaws.com" + "url": "https://applicationinsights.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.me-south-1.api.aws" + "url": "https://applicationinsights.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.me-south-1.amazonaws.com" + "url": "https://applicationinsights.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.sa-east-1.api.aws" + "url": "https://applicationinsights.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.sa-east-1.amazonaws.com" + "url": "https://applicationinsights.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.sa-east-1.api.aws" + "url": "https://applicationinsights.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.sa-east-1.amazonaws.com" + "url": "https://applicationinsights.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-east-1.api.aws" + "url": "https://applicationinsights.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-east-1.amazonaws.com" + "url": "https://applicationinsights.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-east-1.api.aws" + "url": "https://applicationinsights.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-east-1.amazonaws.com" + "url": "https://applicationinsights.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://applicationinsights.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.cn-north-1.amazonaws.com.cn" + "url": "https://applicationinsights.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://applicationinsights.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.cn-north-1.amazonaws.com.cn" + "url": "https://applicationinsights.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-gov-west-1.api.aws" + "url": "https://applicationinsights.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-gov-west-1.amazonaws.com" + "url": "https://applicationinsights.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights.us-gov-west-1.api.aws" + "url": "https://applicationinsights-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.us-gov-west-1.amazonaws.com" + "url": "https://applicationinsights-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-southeast-1.api.aws" + "url": "https://applicationinsights.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-southeast-1.amazonaws.com" + "url": "https://applicationinsights.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-southeast-1.api.aws" + "url": "https://applicationinsights.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-southeast-1.amazonaws.com" + "url": "https://applicationinsights-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-southeast-2.api.aws" + "url": "https://applicationinsights-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.ap-southeast-2.amazonaws.com" + "url": "https://applicationinsights.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-southeast-2.api.aws" + "url": "https://applicationinsights.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.ap-southeast-2.amazonaws.com" + "url": "https://applicationinsights.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-east-1.api.aws" + "url": "https://applicationinsights-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-east-1.amazonaws.com" + "url": "https://applicationinsights-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://applicationinsights.us-east-1.api.aws" + "url": "https://applicationinsights.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://applicationinsights.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.us-east-2.api.aws" + "url": "https://applicationinsights-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.us-east-2.api.aws" + "url": "https://applicationinsights.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://applicationinsights.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://applicationinsights-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://applicationinsights-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://applicationinsights.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://applicationinsights.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1320,9 +571,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1332,11 +583,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/applicationinsights/src/main/resources/codegen-resources/paginators-1.json b/services/applicationinsights/src/main/resources/codegen-resources/paginators-1.json index 2f237e4c958..c9e1dab5611 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/paginators-1.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/paginators-1.json @@ -29,6 +29,11 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListWorkloads": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/applicationinsights/src/main/resources/codegen-resources/service-2.json b/services/applicationinsights/src/main/resources/codegen-resources/service-2.json index 00880d07607..7d1cbec399d 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/service-2.json +++ b/services/applicationinsights/src/main/resources/codegen-resources/service-2.json @@ -14,6 +14,22 @@ "uid":"application-insights-2018-11-25" }, "operations":{ + "AddWorkload":{ + "name":"AddWorkload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AddWorkloadRequest"}, + "output":{"shape":"AddWorkloadResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Adds a workload to a component. Each component can have at most five workloads.

" + }, "CreateApplication":{ "name":"CreateApplication", "http":{ @@ -231,6 +247,21 @@ ], "documentation":"

Describes the anomalies or errors associated with the problem.

" }, + "DescribeWorkload":{ + "name":"DescribeWorkload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWorkloadRequest"}, + "output":{"shape":"DescribeWorkloadResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Describes a workload and its configuration.

" + }, "ListApplications":{ "name":"ListApplications", "http":{ @@ -334,6 +365,36 @@ ], "documentation":"

Retrieve a list of the tags (keys and values) that are associated with a specified application. A tag is a label that you optionally define and associate with an application. Each tag consists of a required tag key and an optional associated tag value. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.

" }, + "ListWorkloads":{ + "name":"ListWorkloads", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWorkloadsRequest"}, + "output":{"shape":"ListWorkloadsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists the workloads that are configured on a given component.

" + }, + "RemoveWorkload":{ + "name":"RemoveWorkload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RemoveWorkloadRequest"}, + "output":{"shape":"RemoveWorkloadResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Remove workload from a component.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -424,6 +485,36 @@ {"shape":"InternalServerException"} ], "documentation":"

Adds a log pattern to a LogPatternSet.

" + }, + "UpdateProblem":{ + "name":"UpdateProblem", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateProblemRequest"}, + "output":{"shape":"UpdateProblemResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates the visibility of the problem or specifies the problem as RESOLVED.

" + }, + "UpdateWorkload":{ + "name":"UpdateWorkload", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateWorkloadRequest"}, + "output":{"shape":"UpdateWorkloadResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Adds a workload to a component. Each component can have at most five workloads.

" } }, "shapes":{ @@ -435,6 +526,47 @@ "documentation":"

User does not have permissions to perform this action.

", "exception":true }, + "AccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d{12}$" + }, + "AddWorkloadRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName", + "WorkloadConfiguration" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"

The name of the resource group.

" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"

The name of the component.

" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"

The configuration settings of the workload. The value is the escaped JSON of the configuration.

" + } + } + }, + "AddWorkloadResponse":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"

The ID of the workload.

" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"

The configuration settings of the workload. The value is the escaped JSON of the configuration.

" + } + } + }, "AffectedResource":{"type":"string"}, "AmazonResourceName":{ "type":"string", @@ -483,6 +615,10 @@ "ApplicationInfo":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the owner of the application.

" + }, "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"

The name of the resource group used for the application.

" @@ -564,6 +700,14 @@ "ConfigurationEvent":{ "type":"structure", "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"

The name of the resource group of the application to which the configuration event belongs.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the owner of the application to which the configuration event belongs.

" + }, "MonitoredResourceARN":{ "shape":"ConfigurationEventMonitoredResourceARN", "documentation":"

The resource monitored by Application Insights.

" @@ -811,6 +955,10 @@ "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"

The name of the resource group.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -842,6 +990,10 @@ "Tier":{ "shape":"Tier", "documentation":"

The tier of the application component.

" + }, + "RecommendationType":{ + "shape":"RecommendationType", + "documentation":"

The recommended configuration type.

" } } }, @@ -868,6 +1020,10 @@ "ComponentName":{ "shape":"ComponentName", "documentation":"

The name of the component.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -902,6 +1058,10 @@ "ComponentName":{ "shape":"ComponentName", "documentation":"

The name of the component.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -934,6 +1094,10 @@ "PatternName":{ "shape":"LogPatternName", "documentation":"

The name of the log pattern.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -944,6 +1108,10 @@ "shape":"ResourceGroupName", "documentation":"

The name of the resource group.

" }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" + }, "LogPattern":{ "shape":"LogPattern", "documentation":"

The successfully created log pattern.

" @@ -957,6 +1125,10 @@ "ObservationId":{ "shape":"ObservationId", "documentation":"

The ID of the observation.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -976,6 +1148,10 @@ "ProblemId":{ "shape":"ProblemId", "documentation":"

The ID of the problem.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -995,6 +1171,10 @@ "ProblemId":{ "shape":"ProblemId", "documentation":"

The ID of the problem.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the owner of the resource group affected by the problem.

" } } }, @@ -1007,6 +1187,49 @@ } } }, + "DescribeWorkloadRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName", + "WorkloadId" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"

The name of the resource group.

" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"

The name of the component.

" + }, + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"

The ID of the workload.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the workload owner.

" + } + } + }, + "DescribeWorkloadResponse":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"

The ID of the workload.

" + }, + "WorkloadRemarks":{ + "shape":"Remarks", + "documentation":"

If logging is supported for the resource type, shows whether the component has configured logs to be monitored.

" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"

The configuration settings of the workload. The value is the escaped JSON of the configuration.

" + } + } + }, "DetectedWorkload":{ "type":"map", "key":{"shape":"Tier"}, @@ -1076,6 +1299,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"

The token to request the next page of results.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -1107,6 +1334,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"

The token to request the next page of results.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -1149,6 +1380,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"

The NextToken value returned from a previous paginated ListConfigurationHistory request where MaxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the NextToken value. This value is null when there are no more results to return.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -1180,6 +1415,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"

The token to request the next page of results.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -1190,6 +1429,10 @@ "shape":"ResourceGroupName", "documentation":"

The name of the resource group.

" }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" + }, "LogPatternSets":{ "shape":"LogPatternSetList", "documentation":"

The list of log pattern sets.

" @@ -1219,6 +1462,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"

The token to request the next page of results.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -1229,6 +1476,10 @@ "shape":"ResourceGroupName", "documentation":"

The name of the resource group.

" }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" + }, "LogPatterns":{ "shape":"LogPatternList", "documentation":"

The list of log patterns.

" @@ -1242,6 +1493,10 @@ "ListProblemsRequest":{ "type":"structure", "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" + }, "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"

The name of the resource group.

" @@ -1265,6 +1520,10 @@ "ComponentName":{ "shape":"ComponentName", "documentation":"

The name of the component.

" + }, + "Visibility":{ + "shape":"Visibility", + "documentation":"

Specifies whether or not you can view the problem. If not specified, visible and ignored problems are returned.

" } } }, @@ -1282,6 +1541,10 @@ "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"

The name of the resource group.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the resource group owner.

" } } }, @@ -1304,6 +1567,48 @@ } } }, + "ListWorkloadsRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"

The name of the resource group.

" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"

The name of the component.

" + }, + "MaxResults":{ + "shape":"MaxEntities", + "documentation":"

The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned NextToken value.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to request the next page of results.

" + }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID of the owner of the workload.

" + } + } + }, + "ListWorkloadsResponse":{ + "type":"structure", + "members":{ + "WorkloadList":{ + "shape":"WorkloadList", + "documentation":"

The list of workloads.

" + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

The token to request the next page of results.

" + } + } + }, "LogFilter":{ "type":"string", "enum":[ @@ -1624,6 +1929,10 @@ "shape":"SeverityLevel", "documentation":"

A measure of the level of impact of the problem.

" }, + "AccountId":{ + "shape":"AccountId", + "documentation":"

The AWS account ID for the owner of the resource group affected by the problem.

" + }, "ResourceGroupName":{ "shape":"ResourceGroupName", "documentation":"

The name of the resource group affected by the problem.

" @@ -1639,6 +1948,14 @@ "LastRecurrenceTime":{ "shape":"LastRecurrenceTime", "documentation":"

The last time that the problem reoccurred after its last resolution.

" + }, + "Visibility":{ + "shape":"Visibility", + "documentation":"

Specifies whether or not you can view the problem. Updates to ignored problems do not generate notifications.

" + }, + "ResolutionMethod":{ + "shape":"ResolutionMethod", + "documentation":"

Specifies how the problem was resolved. If the value is AUTOMATIC, the system resolved the problem. If the value is MANUAL, the user resolved the problem. If the value is UNRESOLVED, then the problem is not resolved.

" } }, "documentation":"

Describes a problem that is detected by correlating observations.

" @@ -1655,6 +1972,14 @@ }, "RdsEventCategories":{"type":"string"}, "RdsEventMessage":{"type":"string"}, + "RecommendationType":{ + "type":"string", + "enum":[ + "INFRA_ONLY", + "WORKLOAD_ONLY", + "ALL" + ] + }, "RecurringCount":{"type":"long"}, "RelatedObservations":{ "type":"structure", @@ -1668,6 +1993,41 @@ }, "Remarks":{"type":"string"}, "RemoveSNSTopic":{"type":"boolean"}, + "RemoveWorkloadRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName", + "WorkloadId" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"

The name of the resource group.

" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"

The name of the component.

" + }, + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"

The ID of the workload.

" + } + } + }, + "RemoveWorkloadResponse":{ + "type":"structure", + "members":{ + } + }, + "ResolutionMethod":{ + "type":"string", + "enum":[ + "MANUAL", + "AUTOMATIC", + "UNRESOLVED" + ] + }, "ResourceARN":{ "type":"string", "max":1011, @@ -1729,7 +2089,8 @@ "IGNORE", "RESOLVED", "PENDING", - "RECURRING" + "RECURRING", + "RECOVERING" ] }, "Tag":{ @@ -1824,7 +2185,10 @@ "SAP_HANA_HIGH_AVAILABILITY", "SQL_SERVER_FAILOVER_CLUSTER_INSTANCE", "SHAREPOINT", - "ACTIVE_DIRECTORY" + "ACTIVE_DIRECTORY", + "SAP_NETWEAVER_STANDARD", + "SAP_NETWEAVER_DISTRIBUTED", + "SAP_NETWEAVER_HIGH_AVAILABILITY" ], "max":50, "min":1 @@ -2015,6 +2379,72 @@ } } }, + "UpdateProblemRequest":{ + "type":"structure", + "required":["ProblemId"], + "members":{ + "ProblemId":{ + "shape":"ProblemId", + "documentation":"

The ID of the problem.

" + }, + "UpdateStatus":{ + "shape":"UpdateStatus", + "documentation":"

The status of the problem. Arguments can be passed for only problems that show a status of RECOVERING.

" + }, + "Visibility":{ + "shape":"Visibility", + "documentation":"

The visibility of a problem. When you pass a value of IGNORED, the problem is removed from the default view, and all notifications for the problem are suspended. When VISIBLE is passed, the IGNORED action is reversed.

" + } + } + }, + "UpdateProblemResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateStatus":{ + "type":"string", + "enum":["RESOLVED"] + }, + "UpdateWorkloadRequest":{ + "type":"structure", + "required":[ + "ResourceGroupName", + "ComponentName", + "WorkloadConfiguration" + ], + "members":{ + "ResourceGroupName":{ + "shape":"ResourceGroupName", + "documentation":"

The name of the resource group.

" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"

The name of the component.

" + }, + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"

The ID of the workload.

" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"

The configuration settings of the workload. The value is the escaped JSON of the configuration.

" + } + } + }, + "UpdateWorkloadResponse":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"

The ID of the workload.

" + }, + "WorkloadConfiguration":{ + "shape":"WorkloadConfiguration", + "documentation":"

The configuration settings of the workload. The value is the escaped JSON of the configuration.

" + } + } + }, "ValidationException":{ "type":"structure", "members":{ @@ -2024,11 +2454,78 @@ "exception":true }, "Value":{"type":"double"}, + "Visibility":{ + "type":"string", + "enum":[ + "IGNORED", + "VISIBLE" + ] + }, + "Workload":{ + "type":"structure", + "members":{ + "WorkloadId":{ + "shape":"WorkloadId", + "documentation":"

The ID of the workload.

" + }, + "ComponentName":{ + "shape":"ComponentName", + "documentation":"

The name of the component.

" + }, + "WorkloadName":{ + "shape":"WorkloadName", + "documentation":"

The name of the workload.

" + }, + "Tier":{ + "shape":"Tier", + "documentation":"

The tier of the workload.

" + }, + "WorkloadRemarks":{ + "shape":"Remarks", + "documentation":"

If logging is supported for the resource type, shows whether the component has configured logs to be monitored.

" + } + }, + "documentation":"

Describes the workloads on a component.

" + }, + "WorkloadConfiguration":{ + "type":"structure", + "members":{ + "WorkloadName":{ + "shape":"WorkloadName", + "documentation":"

The name of the workload.

" + }, + "Tier":{ + "shape":"Tier", + "documentation":"

The configuration of the workload tier.

" + }, + "Configuration":{ + "shape":"ComponentConfiguration", + "documentation":"

The configuration settings of the workload.

" + } + }, + "documentation":"

The configuration of the workload.

" + }, + "WorkloadId":{ + "type":"string", + "max":38, + "min":38, + "pattern":"w-[0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{12}" + }, + "WorkloadList":{ + "type":"list", + "member":{"shape":"Workload"} + }, "WorkloadMetaData":{ "type":"map", "key":{"shape":"MetaDataKey"}, "value":{"shape":"MetaDataValue"} }, + "WorkloadName":{ + "type":"string", + "max":8, + "min":1, + "pattern":"[a-zA-Z0-9\\.\\-_]*" + }, "XRayErrorPercent":{"type":"integer"}, "XRayFaultPercent":{"type":"integer"}, "XRayNodeName":{"type":"string"}, diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index 7ee52d5282f..e65154687d1 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index a135fa84521..a4fc93a3c02 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index a3db17f005a..89cabb2780d 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index d3cd1fed837..bd59a5f2f4e 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT appsync diff --git a/services/arczonalshift/pom.xml b/services/arczonalshift/pom.xml index 780f1994e11..f34ff49ae46 100644 --- a/services/arczonalshift/pom.xml +++ b/services/arczonalshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT arczonalshift AWS Java SDK :: Services :: ARC Zonal Shift diff --git a/services/athena/pom.xml b/services/athena/pom.xml index bf7ca8237ce..e33cbf0286f 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index ef5be9a2632..ed2071e60a3 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index 41d40c92b68..da669963b87 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscaling/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/autoscaling/src/main/resources/codegen-resources/endpoint-rule-set.json index ca4fa951141..37ca93e9346 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/autoscaling/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://autoscaling-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://autoscaling-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://autoscaling.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://autoscaling-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://autoscaling.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://autoscaling-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://autoscaling.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://autoscaling.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://autoscaling.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://autoscaling.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/autoscaling/src/main/resources/codegen-resources/paginators-1.json b/services/autoscaling/src/main/resources/codegen-resources/paginators-1.json index 550ec09c725..67362bd5118 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/paginators-1.json +++ b/services/autoscaling/src/main/resources/codegen-resources/paginators-1.json @@ -52,6 +52,12 @@ "input_token": "NextToken", "limit_key": "MaxRecords", "output_token": "NextToken" + }, + "DescribeWarmPool": { + "input_token": "NextToken", + "limit_key": "MaxRecords", + "output_token": "NextToken", + "result_key": "Instances" } } } \ No newline at end of file diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index 0d626df5856..bb8f13080be 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -141,7 +141,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Completes the lifecycle action for the specified token or instance with the specified result.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a launch template or launch configuration with a user data script that runs while an instance is in a wait state due to a lifecycle hook.

  2. (Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when an instance is put into a wait state due to a lifecycle hook.

  3. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  4. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  5. If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state.

  6. If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Completes the lifecycle action for the specified token or instance with the specified result.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a launch template or launch configuration with a user data script that runs while an instance is in a wait state due to a lifecycle hook.

  2. (Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when an instance is put into a wait state due to a lifecycle hook.

  3. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  4. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  5. If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state.

  6. If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.

For more information, see Complete a lifecycle action in the Amazon EC2 Auto Scaling User Guide.

" }, "CreateAutoScalingGroup":{ "name":"CreateAutoScalingGroup", @@ -705,7 +705,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Detaches one or more traffic sources from the specified Auto Scaling group.

When you detach a taffic, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the traffic source using the DescribeTrafficSources API call. The instances continue to run.

" + "documentation":"

Detaches one or more traffic sources from the specified Auto Scaling group.

When you detach a traffic source, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the traffic source using the DescribeTrafficSources API call. The instances continue to run.

" }, "DisableMetricsCollection":{ "name":"DisableMetricsCollection", @@ -1228,6 +1228,20 @@ }, "documentation":"

Describes an alarm.

" }, + "AlarmList":{ + "type":"list", + "member":{"shape":"XmlStringMaxLen255"} + }, + "AlarmSpecification":{ + "type":"structure", + "members":{ + "Alarms":{ + "shape":"AlarmList", + "documentation":"

The names of one or more CloudWatch alarms to monitor for the instance refresh. You can specify up to 10 alarms.

" + } + }, + "documentation":"

Specifies the CloudWatch alarm specification to use in an instance refresh.

" + }, "Alarms":{ "type":"list", "member":{"shape":"Alarm"} @@ -1236,7 +1250,7 @@ "type":"string", "max":30, "min":1, - "pattern":"[a-zA-Z0-9\\.\\*]+" + "pattern":"[a-zA-Z0-9\\.\\*\\-]+" }, "AllowedInstanceTypes":{ "type":"list", @@ -2853,7 +2867,7 @@ "type":"string", "max":30, "min":1, - "pattern":"[a-zA-Z0-9\\.\\*]+" + "pattern":"[a-zA-Z0-9\\.\\*\\-]+" }, "ExcludedInstanceTypes":{ "type":"list", @@ -4543,7 +4557,7 @@ }, "ScalingAdjustment":{ "shape":"PolicyIncrement", - "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a positive value.

Required if the policy type is SimpleScaling. (Not used with any other policy type.)

" + "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a non-negative value.

Required if the policy type is SimpleScaling. (Not used with any other policy type.)

" }, "Cooldown":{ "shape":"Cooldown", @@ -4714,7 +4728,7 @@ }, "AutoRollback":{ "shape":"AutoRollback", - "documentation":"

(Optional) Indicates whether to roll back the Auto Scaling group to its previous configuration if the instance refresh fails. The default is false.

A rollback is not supported in the following situations:

  • There is no desired configuration specified for the instance refresh.

  • The Auto Scaling group has a launch template that uses an Amazon Web Services Systems Manager parameter instead of an AMI ID for the ImageId property.

  • The Auto Scaling group uses the launch template's $Latest or $Default version.

" + "documentation":"

(Optional) Indicates whether to roll back the Auto Scaling group to its previous configuration if the instance refresh fails or a CloudWatch alarm threshold is met. The default is false.

A rollback is not supported in the following situations:

  • There is no desired configuration specified for the instance refresh.

  • The Auto Scaling group has a launch template that uses an Amazon Web Services Systems Manager parameter instead of an AMI ID for the ImageId property.

  • The Auto Scaling group uses the launch template's $Latest or $Default version.

For more information, see Undo changes with a rollback in the Amazon EC2 Auto Scaling User Guide.

" }, "ScaleInProtectedInstances":{ "shape":"ScaleInProtectedInstances", @@ -4723,6 +4737,10 @@ "StandbyInstances":{ "shape":"StandbyInstances", "documentation":"

Choose the behavior that you want Amazon EC2 Auto Scaling to use if instances in Standby state are found.

The following lists the valid values:

Terminate

Amazon EC2 Auto Scaling terminates instances that are in Standby.

Ignore

Amazon EC2 Auto Scaling ignores instances that are in Standby and continues to replace instances that are in the InService state.

Wait (default)

Amazon EC2 Auto Scaling waits one hour for you to return the instances to service. Otherwise, the instance refresh will fail.

" + }, + "AlarmSpecification":{ + "shape":"AlarmSpecification", + "documentation":"

(Optional) The CloudWatch alarm specification. CloudWatch alarms can be used to identify any issues and fail the operation if an alarm threshold is met.

" } }, "documentation":"

Describes the preferences for an instance refresh.

" @@ -4808,6 +4826,7 @@ }, "RollbackInstanceRefreshType":{ "type":"structure", + "required":["AutoScalingGroupName"], "members":{ "AutoScalingGroupName":{ "shape":"XmlStringMaxLen255", @@ -5190,7 +5209,7 @@ }, "Preferences":{ "shape":"RefreshPreferences", - "documentation":"

Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum healthy percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following:

  • Auto rollback

  • Checkpoints

  • Skip matching

" + "documentation":"

Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum healthy percentage, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following:

  • Auto rollback

  • Checkpoints

  • CloudWatch alarms

  • Skip matching

" } } }, @@ -5208,7 +5227,7 @@ }, "ScalingAdjustment":{ "shape":"PolicyIncrement", - "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

The amount by which to scale. The adjustment is based on the value that you specified in the AdjustmentType property (either an absolute number or a percentage). A positive value adds to the current capacity and a negative number subtracts from the current capacity.

" + "documentation":"

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity. For exact capacity, you must specify a non-negative value.

" } }, "documentation":"

Describes information used to create a step adjustment for a step scaling policy.

For the following examples, suppose that you have an alarm with a breach threshold of 50:

  • To trigger the adjustment when the metric is greater than or equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of 10.

  • To trigger the adjustment when the metric is greater than 40 and less than or equal to 50, specify a lower bound of -10 and an upper bound of 0.

There are a few rules for the step adjustments for your step policy:

  • The ranges of your step adjustments can't overlap or have a gap.

  • At most, one step adjustment can have a null lower bound. If one step adjustment has a negative lower bound, then there must be a step adjustment with a null lower bound.

  • At most, one step adjustment can have a null upper bound. If one step adjustment has a positive upper bound, then there must be a step adjustment with a null upper bound.

  • The upper and lower bound can't be null in the same step adjustment.

For more information, see Step adjustments in the Amazon EC2 Auto Scaling User Guide.

" diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 014cde1dfa4..03292056a06 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 7b1a2f916e1..17f10c5b37b 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backup/src/main/resources/codegen-resources/paginators-1.json b/services/backup/src/main/resources/codegen-resources/paginators-1.json index 51a5dad7a78..7ad393927f0 100644 --- a/services/backup/src/main/resources/codegen-resources/paginators-1.json +++ b/services/backup/src/main/resources/codegen-resources/paginators-1.json @@ -59,6 +59,12 @@ "limit_key": "MaxResults", "result_key": "Results" }, + "ListProtectedResourcesByBackupVault": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Results" + }, "ListRecoveryPointsByBackupVault": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/backup/src/main/resources/codegen-resources/service-2.json b/services/backup/src/main/resources/codegen-resources/service-2.json index 567173a9d36..3e72248e09a 100644 --- a/services/backup/src/main/resources/codegen-resources/service-2.json +++ b/services/backup/src/main/resources/codegen-resources/service-2.json @@ -119,6 +119,25 @@ "documentation":"

This action creates a legal hold on a recovery point (backup). A legal hold is a restraint on altering or deleting a backup until an authorized user cancels the legal hold. Any actions to delete or disassociate a recovery point will fail with an error if one or more active legal holds are on the recovery point.

", "idempotent":true }, + "CreateLogicallyAirGappedBackupVault":{ + "name":"CreateLogicallyAirGappedBackupVault", + "http":{ + "method":"PUT", + "requestUri":"/logically-air-gapped-backup-vaults/{backupVaultName}" + }, + "input":{"shape":"CreateLogicallyAirGappedBackupVaultInput"}, + "output":{"shape":"CreateLogicallyAirGappedBackupVaultOutput"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

This request creates a logical container where backups are stored.

This request includes a name, optionally one or more resource tags, an encryption key, and a request ID.

Do not include sensitive data, such as passport numbers, in the name of a backup vault.

", + "idempotent":true + }, "CreateReportPlan":{ "name":"CreateReportPlan", "http":{ @@ -817,6 +836,21 @@ "documentation":"

Returns an array of resources successfully backed up by Backup, including the time the resource was saved, an Amazon Resource Name (ARN) of the resource, and a resource type.

", "idempotent":true }, + "ListProtectedResourcesByBackupVault":{ + "name":"ListProtectedResourcesByBackupVault", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/{backupVaultName}/resources/" + }, + "input":{"shape":"ListProtectedResourcesByBackupVaultInput"}, + "output":{"shape":"ListProtectedResourcesByBackupVaultOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

This request lists the protected resources corresponding to each backup vault.

" + }, "ListRecoveryPointsByBackupVault":{ "name":"ListRecoveryPointsByBackupVault", "http":{ @@ -1290,7 +1324,7 @@ }, "State":{ "shape":"BackupJobState", - "documentation":"

The current state of a resource recovery point.

" + "documentation":"

The current state of a backup job.

" }, "StatusMessage":{ "shape":"string", @@ -1571,7 +1605,7 @@ }, "StartWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors.

During the start window, the backup job status remains in CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).

" + "documentation":"

A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, it must be at least 60 minutes to avoid errors.

This parameter has a maximum value of 100 years (52,560,000 minutes).

During the start window, the backup job status remains in CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).

" }, "CompletionWindowMinutes":{ "shape":"WindowMinutes", @@ -1579,7 +1613,7 @@ }, "Lifecycle":{ "shape":"Lifecycle", - "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types.

" + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types.

This parameter has a maximum value of 100 years (36,500 days).

" }, "RecoveryPointTags":{ "shape":"Tags", @@ -2287,6 +2321,59 @@ } } }, + "CreateLogicallyAirGappedBackupVaultInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "MinRetentionDays", + "MaxRetentionDays" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

This is the name of the vault that is being created.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "BackupVaultTags":{ + "shape":"Tags", + "documentation":"

These are the tags that will be included in the newly-created vault.

" + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

This is the ID of the creation request.

" + }, + "MinRetentionDays":{ + "shape":"Long", + "documentation":"

This setting specifies the minimum retention period that the vault retains its recovery points. If this parameter is not specified, no minimum retention period is enforced.

If specified, any backup or copy job to the vault must have a lifecycle policy with a retention period equal to or longer than the minimum retention period. If a job retention period is shorter than that minimum retention period, then the vault fails the backup or copy job, and you should either modify your lifecycle settings or use a different vault.

" + }, + "MaxRetentionDays":{ + "shape":"Long", + "documentation":"

This is the setting that specifies the maximum retention period that the vault retains its recovery points. If this parameter is not specified, Backup does not enforce a maximum retention period on the recovery points in the vault (allowing indefinite storage).

If specified, any backup or copy job to the vault must have a lifecycle policy with a retention period equal to or shorter than the maximum retention period. If the job retention period is longer than that maximum retention period, then the vault fails the backup or copy job, and you should either modify your lifecycle settings or use a different vault.

" + } + } + }, + "CreateLogicallyAirGappedBackupVaultOutput":{ + "type":"structure", + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Logically air-gapped backup vaults are identified by names that are unique to the account used to create them and the Region where they are created. They consist of lowercase letters, numbers, and hyphens.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

This is the ARN (Amazon Resource Name) of the vault being created.

" + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time when the vault was created.

This value is in Unix format, Coordinated Universal Time (UTC), and accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "VaultState":{ + "shape":"VaultState", + "documentation":"

This is the current state of the vault.

" + } + } + }, "CreateReportPlanInput":{ "type":"structure", "required":[ @@ -2572,7 +2659,7 @@ }, "State":{ "shape":"BackupJobState", - "documentation":"

The current state of a resource recovery point.

" + "documentation":"

The current state of a backup job.

" }, "StatusMessage":{ "shape":"string", @@ -2649,6 +2736,12 @@ "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Amazon Web Services Region where they are created. They consist of lowercase letters, numbers, and hyphens.

", "location":"uri", "locationName":"backupVaultName" + }, + "BackupVaultAccountId":{ + "shape":"string", + "documentation":"

This is the account ID of the specified backup vault.

", + "location":"querystring", + "locationName":"backupVaultAccountId" } } }, @@ -2663,6 +2756,10 @@ "shape":"ARN", "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault.

" }, + "VaultType":{ + "shape":"VaultType", + "documentation":"

This is the type of vault described.

" + }, "EncryptionKeyArn":{ "shape":"ARN", "documentation":"

The server-side encryption key that is used to protect your backups; for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" @@ -2836,6 +2933,12 @@ "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", "location":"uri", "locationName":"recoveryPointArn" + }, + "BackupVaultAccountId":{ + "shape":"AccountId", + "documentation":"

This is the account ID of the specified backup vault.

", + "location":"querystring", + "locationName":"backupVaultAccountId" } } }, @@ -3483,6 +3586,12 @@ "documentation":"

An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", "location":"uri", "locationName":"recoveryPointArn" + }, + "BackupVaultAccountId":{ + "shape":"AccountId", + "documentation":"

This is the account ID of the specified backup vault.

", + "location":"querystring", + "locationName":"backupVaultAccountId" } } }, @@ -3884,6 +3993,18 @@ "ListBackupVaultsInput":{ "type":"structure", "members":{ + "ByVaultType":{ + "shape":"VaultType", + "documentation":"

This parameter will sort the list of vaults by vault type.

", + "location":"querystring", + "locationName":"vaultType" + }, + "ByShared":{ + "shape":"boolean", + "documentation":"

This parameter will sort the list of vaults by shared vaults.

", + "location":"querystring", + "locationName":"shared" + }, "NextToken":{ "shape":"string", "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", @@ -4065,6 +4186,49 @@ "type":"list", "member":{"shape":"Condition"} }, + "ListProtectedResourcesByBackupVaultInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

This is the list of protected resources by backup vault within the vault(s) you specify by name.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "BackupVaultAccountId":{ + "shape":"AccountId", + "documentation":"

This is the list of protected resources by backup vault within the vault(s) you specify by account ID.

", + "location":"querystring", + "locationName":"backupVaultAccountId" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListProtectedResourcesByBackupVaultOutput":{ + "type":"structure", + "members":{ + "Results":{ + "shape":"ProtectedResourcesList", + "documentation":"

These are the results returned for the request ListProtectedResourcesByBackupVault.

" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, "ListProtectedResourcesInput":{ "type":"structure", "members":{ @@ -4105,6 +4269,12 @@ "location":"uri", "locationName":"backupVaultName" }, + "BackupVaultAccountId":{ + "shape":"AccountId", + "documentation":"

This parameter will sort the list of recovery points by account ID.

", + "location":"querystring", + "locationName":"backupVaultAccountId" + }, "NextToken":{ "shape":"string", "documentation":"

The next item following a partial list of returned items. For example, if a request is made to return maxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", @@ -5106,15 +5276,15 @@ }, "StartWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours. If this value is included, it must be at least 60 minutes to avoid errors.

During the start window, the backup job status remains in CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).

" + "documentation":"

A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours. If this value is included, it must be at least 60 minutes to avoid errors.

This parameter has a maximum value of 100 years (52,560,000 minutes).

During the start window, the backup job status remains in CREATED status until it has successfully begun or until the start window time has run out. If within the start window time Backup receives an error that allows the job to be retried, Backup will automatically retry to begin the job at least every 10 minutes until the backup successfully begins (the job status changes to RUNNING) or until the job status changes to EXPIRED (which is expected to occur when the start window time is over).

" }, "CompleteWindowMinutes":{ "shape":"WindowMinutes", - "documentation":"

A value in minutes during which a successfully started backup must complete, or else Backup will cancel the job. This value is optional. This value begins counting down from when the backup was scheduled. It does not add additional time for StartWindowMinutes, or if the backup started later than scheduled.

" + "documentation":"

A value in minutes during which a successfully started backup must complete, or else Backup will cancel the job. This value is optional. This value begins counting down from when the backup was scheduled. It does not add additional time for StartWindowMinutes, or if the backup started later than scheduled.

Like StartWindowMinutes, this parameter has a maximum value of 100 years (52,560,000 minutes).

" }, "Lifecycle":{ "shape":"Lifecycle", - "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types.

" + "documentation":"

The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup will transition and expire backups automatically according to the lifecycle that you define.

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been transitioned to cold.

Resource types that are able to be transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the Feature availability by resource table. Backup ignores this expression for other resource types.

This parameter has a maximum value of 100 years (36,500 days).

" }, "RecoveryPointTags":{ "shape":"Tags", @@ -5539,6 +5709,21 @@ "type":"list", "member":{"shape":"string"} }, + "VaultState":{ + "type":"string", + "enum":[ + "CREATING", + "AVAILABLE", + "FAILED" + ] + }, + "VaultType":{ + "type":"string", + "enum":[ + "BACKUP_VAULT", + "LOGICALLY_AIR_GAPPED_BACKUP_VAULT" + ] + }, "WindowMinutes":{"type":"long"}, "boolean":{"type":"boolean"}, "integer":{"type":"integer"}, diff --git a/services/backupgateway/pom.xml b/services/backupgateway/pom.xml index 037c34f921c..cbae64ce942 100644 --- a/services/backupgateway/pom.xml +++ b/services/backupgateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT backupgateway AWS Java SDK :: Services :: Backup Gateway diff --git a/services/backupstorage/pom.xml b/services/backupstorage/pom.xml index 0626ab96eef..d1ea59048cd 100644 --- a/services/backupstorage/pom.xml +++ b/services/backupstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT backupstorage AWS Java SDK :: Services :: Backup Storage diff --git a/services/batch/pom.xml b/services/batch/pom.xml index 793cc3d8edf..5fea8f4a9f9 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/batch/src/main/resources/codegen-resources/service-2.json b/services/batch/src/main/resources/codegen-resources/service-2.json index 5294dfd22b4..af52be4a1d7 100644 --- a/services/batch/src/main/resources/codegen-resources/service-2.json +++ b/services/batch/src/main/resources/codegen-resources/service-2.json @@ -38,7 +38,7 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ], - "documentation":"

Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs aren't supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

To create a compute environment that uses EKS resources, the caller must have permissions to call eks:DescribeCluster.

Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

In April 2022, Batch added enhanced support for updating compute environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules:

  • Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role.

  • Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE or SPOT_CAPACITY_OPTIMIZED.

  • Set the update to latest image version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment.

  • Don't specify an AMI ID in imageId, imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to $Default or $Latest, by setting either a new default version for the launch template (if $Default) or by adding a new version to the launch template (if $Latest).

If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the version setting in the launch template (launchTemplate) is set to $Latest or $Default, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the launchTemplate wasn't updated.

" + "documentation":"

Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs aren't supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

To create a compute environment that uses EKS resources, the caller must have permissions to call eks:DescribeCluster.

Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is available. You're responsible for the management of the guest operating system. This includes any updates and security patches. You're also responsible for any additional application software or utilities that you install on the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

In April 2022, Batch added enhanced support for updating compute environments. For more information, see Updating compute environments. To use the enhanced updating of compute environments to update AMIs, follow these rules:

  • Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role.

  • Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED.

  • Set the update to latest image version (updateToLatestImageVersion) parameter to true. The updateToLatestImageVersion parameter is used when you update a compute environment. This parameter is ignored when you create a compute environment.

  • Don't specify an AMI ID in imageId, imageIdOverride (in ec2Configuration ), or in the launch template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID in the imageId or imageIdOverride parameters, or the launch template identified by the LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the imageId or imageIdOverride parameters. It can only be replaced by specifying a different launch template, or if the launch template version is set to $Default or $Latest, by setting either a new default version for the launch template (if $Default) or by adding a new version to the launch template (if $Latest).

If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be re-selected. If the version setting in the launch template (launchTemplate) is set to $Latest or $Default, the latest or default version of the launch template is evaluated up at the time of the infrastructure update, even if the launchTemplate wasn't updated.

" }, "CreateJobQueue":{ "name":"CreateJobQueue", @@ -498,7 +498,8 @@ "enum":[ "BEST_FIT", "BEST_FIT_PROGRESSIVE", - "SPOT_CAPACITY_OPTIMIZED" + "SPOT_CAPACITY_OPTIMIZED", + "SPOT_PRICE_CAPACITY_OPTIMIZED" ] }, "CRType":{ @@ -514,7 +515,8 @@ "type":"string", "enum":[ "BEST_FIT_PROGRESSIVE", - "SPOT_CAPACITY_OPTIMIZED" + "SPOT_CAPACITY_OPTIMIZED", + "SPOT_PRICE_CAPACITY_OPTIMIZED" ] }, "CancelJobRequest":{ @@ -659,7 +661,7 @@ }, "allocationStrategy":{ "shape":"CRAllocationStrategy", - "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

BEST_FIT (default)

Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide.

BEST_FIT_PROGRESSIVE

Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" + "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

BEST_FIT (default)

Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide.

BEST_FIT_PROGRESSIVE

Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

SPOT_PRICE_CAPACITY_OPTIMIZED

The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" }, "minvCpus":{ "shape":"Integer", @@ -667,7 +669,7 @@ }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of vCPUs that a compute environment can support.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

" + "documentation":"

The maximum number of vCPUs that a compute environment can support.

With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

" }, "desiredvCpus":{ "shape":"Integer", @@ -735,7 +737,7 @@ }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

" + "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

" }, "desiredvCpus":{ "shape":"Integer", @@ -751,7 +753,7 @@ }, "allocationStrategy":{ "shape":"CRUpdateAllocationStrategy", - "documentation":"

The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

BEST_FIT_PROGRESSIVE

Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" + "documentation":"

The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

BEST_FIT_PROGRESSIVE

Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

SPOT_PRICE_CAPACITY_OPTIMIZED

The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" }, "instanceTypes":{ "shape":"StringList", @@ -3035,7 +3037,7 @@ }, "cpuArchitecture":{ "shape":"String", - "documentation":"

The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64.

This parameter must be set to X86_64 for Windows containers.

" + "documentation":"

The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64.

This parameter must be set to X86_64 for Windows containers.

" } }, "documentation":"

An object that represents the compute environment architecture for Batch jobs on Fargate.

" diff --git a/services/billingconductor/pom.xml b/services/billingconductor/pom.xml index d5c52b26448..07e1650b12a 100644 --- a/services/billingconductor/pom.xml +++ b/services/billingconductor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT billingconductor AWS Java SDK :: Services :: Billingconductor diff --git a/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json index b4ea1f3843c..a71edcaf080 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/billingconductor/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -138,208 +138,40 @@ }, "aws" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor-fips.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://billingconductor.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "name": "sigv4", + "signingName": "billingconductor", + "signingRegion": "us-east-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://billingconductor.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "billingconductor", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -543,33 +375,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://billingconductor.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "billingconductor", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/billingconductor/src/main/resources/codegen-resources/endpoint-tests.json b/services/billingconductor/src/main/resources/codegen-resources/endpoint-tests.json index b9c1c6c3798..05fb6f36f46 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/billingconductor/src/main/resources/codegen-resources/endpoint-tests.json @@ -18,8 +18,8 @@ }, "params": { "Region": "aws-global", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -31,8 +31,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -44,8 +44,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -57,8 +57,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -79,8 +79,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -92,8 +92,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -105,8 +105,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -118,8 +118,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -131,8 +131,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -144,8 +144,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -157,8 +157,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -170,8 +170,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -183,8 +183,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -196,8 +207,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -209,8 +231,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -222,8 +255,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -235,8 +279,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -248,8 +292,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -261,8 +305,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -273,8 +317,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -285,10 +329,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/billingconductor/src/main/resources/codegen-resources/service-2.json b/services/billingconductor/src/main/resources/codegen-resources/service-2.json index d51720264a1..87d16cf3955 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/service-2.json +++ b/services/billingconductor/src/main/resources/codegen-resources/service-2.json @@ -625,22 +625,22 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

The associating array of account IDs.

" + "documentation":"

The associating array of account IDs.

" }, "BillingGroupArn":{ "shape":"BillingGroupArn", - "documentation":"

The Billing Group Arn that the linked account is associated to.

" + "documentation":"

The Billing Group Arn that the linked account is associated to.

" }, "AccountName":{ "shape":"AccountName", - "documentation":"

The Amazon Web Services account name.

" + "documentation":"

The Amazon Web Services account name.

" }, "AccountEmail":{ "shape":"AccountEmail", - "documentation":"

The Amazon Web Services account email.

" + "documentation":"

The Amazon Web Services account email.

" } }, - "documentation":"

A representation of a linked account.

" + "documentation":"

A representation of a linked account.

" }, "AccountEmail":{ "type":"string", @@ -652,10 +652,14 @@ "members":{ "LinkedAccountIds":{ "shape":"AccountIdList", - "documentation":"

The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.

" + "documentation":"

The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.

" + }, + "AutoAssociate":{ + "shape":"Boolean", + "documentation":"

Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.

" } }, - "documentation":"

The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated family.

" + "documentation":"

The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.

" }, "AccountId":{ "type":"string", @@ -740,14 +744,14 @@ "members":{ "Message":{ "shape":"String", - "documentation":"

The reason why the resource association failed.

" + "documentation":"

The reason why the resource association failed.

" }, "Reason":{ "shape":"AssociateResourceErrorReason", - "documentation":"

A static error code that's used to classify the type of failure.

" + "documentation":"

A static error code that's used to classify the type of failure.

" } }, - "documentation":"

A representation of a resource association error.

" + "documentation":"

A representation of a resource association error.

" }, "AssociateResourceErrorReason":{ "type":"string", @@ -764,14 +768,14 @@ "members":{ "Arn":{ "shape":"CustomLineItemAssociationElement", - "documentation":"

The resource ARN that was associated to the custom line item.

" + "documentation":"

The resource ARN that was associated to the custom line item.

" }, "Error":{ "shape":"AssociateResourceError", - "documentation":"

An AssociateResourceError that will populate if the resource association fails.

" + "documentation":"

An AssociateResourceError that will populate if the resource association fails.

" } }, - "documentation":"

A resource association result for a percentage custom line item.

" + "documentation":"

A resource association result for a percentage custom line item.

" }, "AssociateResourcesResponseList":{ "type":"list", @@ -862,30 +866,30 @@ "members":{ "Arn":{ "shape":"BillingGroupArn", - "documentation":"

The Amazon Resource Name (ARN) of a billing group.

" + "documentation":"

The Amazon Resource Name (ARN) of a billing group.

" }, "AWSCost":{ "shape":"AWSCost", - "documentation":"

The actual Amazon Web Services charges for the billing group.

" + "documentation":"

The actual Amazon Web Services charges for the billing group.

" }, "ProformaCost":{ "shape":"ProformaCost", - "documentation":"

The hypothetical Amazon Web Services charges based on the associated pricing plan of a billing group.

" + "documentation":"

The hypothetical Amazon Web Services charges based on the associated pricing plan of a billing group.

" }, "Margin":{ "shape":"Margin", - "documentation":"

The billing group margin.

" + "documentation":"

The billing group margin.

" }, "MarginPercentage":{ "shape":"MarginPercentage", - "documentation":"

The percentage of billing group margin.

" + "documentation":"

The percentage of billing group margin.

" }, "Currency":{ "shape":"Currency", - "documentation":"

The displayed currency.

" + "documentation":"

The displayed currency.

" } }, - "documentation":"

A summary report of actual Amazon Web Services charges and calculated Amazon Web Services charges, based on the associated pricing plan of a billing group.

" + "documentation":"

A summary report of actual Amazon Web Services charges and calculated Amazon Web Services charges, based on the associated pricing plan of a billing group.

" }, "BillingGroupCostReportList":{ "type":"list", @@ -910,43 +914,47 @@ "members":{ "Name":{ "shape":"BillingGroupName", - "documentation":"

The name of the billing group.

" + "documentation":"

The name of the billing group.

" }, "Arn":{ "shape":"BillingGroupArn", - "documentation":"

The Amazon Resource Number (ARN) that can be used to uniquely identify the billing group.

" + "documentation":"

The Amazon Resource Number (ARN) that can be used to uniquely identify the billing group.

" }, "Description":{ "shape":"BillingGroupDescription", - "documentation":"

The description of the billing group.

" + "documentation":"

The description of the billing group.

" }, "PrimaryAccountId":{ "shape":"AccountId", - "documentation":"

The account ID that serves as the main account in a billing group.

" + "documentation":"

The account ID that serves as the main account in a billing group.

" }, "ComputationPreference":{"shape":"ComputationPreference"}, "Size":{ "shape":"NumberOfAccounts", - "documentation":"

The number of accounts in the particular billing group.

" + "documentation":"

The number of accounts in the particular billing group.

" }, "CreationTime":{ "shape":"Instant", - "documentation":"

The time when the billing group was created.

" + "documentation":"

The time when the billing group was created.

" }, "LastModifiedTime":{ "shape":"Instant", - "documentation":"

The most recent time when the billing group was modified.

" + "documentation":"

The most recent time when the billing group was modified.

" }, "Status":{ "shape":"BillingGroupStatus", - "documentation":"

The billing group status. Only one of the valid values can be used.

" + "documentation":"

The billing group status. Only one of the valid values can be used.

" }, "StatusReason":{ "shape":"BillingGroupStatusReason", - "documentation":"

The reason why the billing group is in its current status.

" + "documentation":"

The reason why the billing group is in its current status.

" + }, + "AccountGrouping":{ + "shape":"ListBillingGroupAccountGrouping", + "documentation":"

Specifies if the billing group has automatic account association (AutoAssociate) enabled.

" } }, - "documentation":"

A representation of a billing group.

" + "documentation":"

A representation of a billing group.

" }, "BillingGroupName":{ "type":"string", @@ -973,6 +981,10 @@ "type":"string", "pattern":"\\d{4}-(0?[1-9]|1[012])" }, + "Boolean":{ + "type":"boolean", + "box":true + }, "ClientToken":{ "type":"string", "max":64, @@ -988,7 +1000,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the pricing plan that's used to compute the Amazon Web Services charges for a billing group.

" } }, - "documentation":"

The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group.

" + "documentation":"

The preferences and settings that will be used to compute the Amazon Web Services charges for a billing group.

" }, "ConflictException":{ "type":"structure", @@ -1050,7 +1062,7 @@ }, "AccountGrouping":{ "shape":"AccountGrouping", - "documentation":"

The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated family.

" + "documentation":"

The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.

" }, "ComputationPreference":{ "shape":"ComputationPreference", @@ -1306,14 +1318,14 @@ "members":{ "InclusiveStartBillingPeriod":{ "shape":"BillingPeriod", - "documentation":"

The inclusive start billing period that defines a billing period range where a custom line is applied.

" + "documentation":"

The inclusive start billing period that defines a billing period range where a custom line is applied.

" }, "ExclusiveEndBillingPeriod":{ "shape":"BillingPeriod", - "documentation":"

The inclusive end billing period that defines a billing period range where a custom line is applied.

" + "documentation":"

The inclusive end billing period that defines a billing period range where a custom line is applied.

" } }, - "documentation":"

The billing period range in which the custom line item request will be applied.

" + "documentation":"

The billing period range in which the custom line item request will be applied.

" }, "CustomLineItemChargeDetails":{ "type":"structure", @@ -1321,18 +1333,18 @@ "members":{ "Flat":{ "shape":"CustomLineItemFlatChargeDetails", - "documentation":"

A CustomLineItemFlatChargeDetails that describes the charge details of a flat custom line item.

" + "documentation":"

A CustomLineItemFlatChargeDetails that describes the charge details of a flat custom line item.

" }, "Percentage":{ "shape":"CustomLineItemPercentageChargeDetails", - "documentation":"

A CustomLineItemPercentageChargeDetails that describes the charge details of a percentage custom line item.

" + "documentation":"

A CustomLineItemPercentageChargeDetails that describes the charge details of a percentage custom line item.

" }, "Type":{ "shape":"CustomLineItemType", - "documentation":"

The type of the custom line item that indicates whether the charge is a fee or credit.

" + "documentation":"

The type of the custom line item that indicates whether the charge is a fee or credit.

" } }, - "documentation":"

The charge details of a custom line item. It should contain only one of Flat or Percentage.

" + "documentation":"

The charge details of a custom line item. It should contain only one of Flat or Percentage.

" }, "CustomLineItemChargeValue":{ "type":"double", @@ -1352,10 +1364,10 @@ "members":{ "ChargeValue":{ "shape":"CustomLineItemChargeValue", - "documentation":"

The custom line item's fixed charge value in USD.

" + "documentation":"

The custom line item's fixed charge value in USD.

" } }, - "documentation":"

A representation of the charge details that are associated with a flat custom line item.

" + "documentation":"

A representation of the charge details that are associated with a flat custom line item.

" }, "CustomLineItemList":{ "type":"list", @@ -1366,46 +1378,46 @@ "members":{ "Arn":{ "shape":"CustomLineItemArn", - "documentation":"

The Amazon Resource Names (ARNs) for custom line items.

" + "documentation":"

The Amazon Resource Names (ARNs) for custom line items.

" }, "Name":{ "shape":"CustomLineItemName", - "documentation":"

The custom line item's name.

" + "documentation":"

The custom line item's name.

" }, "ChargeDetails":{ "shape":"ListCustomLineItemChargeDetails", - "documentation":"

A ListCustomLineItemChargeDetails that describes the charge details of a custom line item.

" + "documentation":"

A ListCustomLineItemChargeDetails that describes the charge details of a custom line item.

" }, "CurrencyCode":{ "shape":"CurrencyCode", - "documentation":"

The custom line item's charge value currency. Only one of the valid values can be used.

" + "documentation":"

The custom line item's charge value currency. Only one of the valid values can be used.

" }, "Description":{ "shape":"CustomLineItemDescription", - "documentation":"

The custom line item's description. This is shown on the Bills page in association with the charge value.

" + "documentation":"

The custom line item's description. This is shown on the Bills page in association with the charge value.

" }, "ProductCode":{ "shape":"CustomLineItemProductCode", - "documentation":"

The product code that's associated with the custom line item.

" + "documentation":"

The product code that's associated with the custom line item.

" }, "BillingGroupArn":{ "shape":"BillingGroupArn", - "documentation":"

The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.

" + "documentation":"

The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.

" }, "CreationTime":{ "shape":"Instant", - "documentation":"

The time created.

" + "documentation":"

The time created.

" }, "LastModifiedTime":{ "shape":"Instant", - "documentation":"

The most recent time when the custom line item was modified.

" + "documentation":"

The most recent time when the custom line item was modified.

" }, "AssociationSize":{ "shape":"NumberOfAssociations", - "documentation":"

The number of resources that are associated to the custom line item.

" + "documentation":"

The number of resources that are associated to the custom line item.

" } }, - "documentation":"

A representation of a custom line item.

" + "documentation":"

A representation of a custom line item.

" }, "CustomLineItemName":{ "type":"string", @@ -1426,14 +1438,14 @@ "members":{ "PercentageValue":{ "shape":"CustomLineItemPercentageChargeValue", - "documentation":"

The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.

" + "documentation":"

The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.

" }, "AssociatedValues":{ "shape":"CustomLineItemAssociationsList", - "documentation":"

A list of resource ARNs to associate to the percentage custom line item.

" + "documentation":"

A list of resource ARNs to associate to the percentage custom line item.

" } }, - "documentation":"

A representation of the charge details that are associated with a percentage custom line item.

" + "documentation":"

A representation of the charge details that are associated with a percentage custom line item.

" }, "CustomLineItemPercentageChargeValue":{ "type":"double", @@ -1653,14 +1665,14 @@ "members":{ "Arn":{ "shape":"CustomLineItemAssociationElement", - "documentation":"

The resource ARN that was disassociated from the custom line item.

" + "documentation":"

The resource ARN that was disassociated from the custom line item.

" }, "Error":{ "shape":"AssociateResourceError", "documentation":"

An AssociateResourceError that's shown if the resource disassociation fails.

" } }, - "documentation":"

A resource disassociation result for a percentage custom line item.

" + "documentation":"

A resource disassociation result for a percentage custom line item.

" }, "DisassociateResourcesResponseList":{ "type":"list", @@ -1700,18 +1712,18 @@ "members":{ "Association":{ "shape":"Association", - "documentation":"

MONITORED: linked accounts that are associated to billing groups.

UNMONITORED: linked accounts that are not associated to billing groups.

Billing Group Arn: linked accounts that are associated to the provided Billing Group Arn.

" + "documentation":"

MONITORED: linked accounts that are associated to billing groups.

UNMONITORED: linked accounts that are not associated to billing groups.

Billing Group Arn: linked accounts that are associated to the provided Billing Group Arn.

" }, "AccountId":{ "shape":"AccountId", - "documentation":"

The Amazon Web Services account ID to filter on.

" + "documentation":"

The Amazon Web Services account ID to filter on.

" }, "AccountIds":{ "shape":"AccountIdFilterList", "documentation":"

The list of Amazon Web Services IDs to retrieve their associated billing group for a given time range.

" } }, - "documentation":"

The filter on the account ID of the linked account, or any of the following:

MONITORED: linked accounts that are associated to billing groups.

UNMONITORED: linked accounts that are not associated to billing groups.

Billing Group Arn: linked accounts that are associated to the provided Billing Group Arn.

" + "documentation":"

The filter on the account ID of the linked account, or any of the following:

MONITORED: linked accounts that are associated to billing groups.

UNMONITORED: linked accounts that are not associated to billing groups.

Billing Group Arn: linked accounts that are associated to the provided Billing Group Arn.

" }, "ListAccountAssociationsInput":{ "type":"structure", @@ -1743,6 +1755,16 @@ } } }, + "ListBillingGroupAccountGrouping":{ + "type":"structure", + "members":{ + "AutoAssociate":{ + "shape":"Boolean", + "documentation":"

Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.

" + } + }, + "documentation":"

Specifies if the billing group has the following features enabled.

" + }, "ListBillingGroupCostReportsFilter":{ "type":"structure", "members":{ @@ -1751,7 +1773,7 @@ "documentation":"

The list of Amazon Resource Names (ARNs) used to filter billing groups to retrieve reports.

" } }, - "documentation":"

The filter used to retrieve specific BillingGroupCostReportElements.

" + "documentation":"

The filter used to retrieve specific BillingGroupCostReportElements.

" }, "ListBillingGroupCostReportsInput":{ "type":"structure", @@ -1792,18 +1814,22 @@ "members":{ "Arns":{ "shape":"BillingGroupArnList", - "documentation":"

The list of billing group Amazon Resource Names (ARNs) to retrieve information.

" + "documentation":"

The list of billing group Amazon Resource Names (ARNs) to retrieve information.

" }, "PricingPlan":{ "shape":"PricingPlanFullArn", - "documentation":"

The pricing plan Amazon Resource Names (ARNs) to retrieve information.

" + "documentation":"

The pricing plan Amazon Resource Names (ARNs) to retrieve information.

" }, "Statuses":{ "shape":"BillingGroupStatusList", "documentation":"

A list of billing groups to retrieve their current status for a specific time range

" + }, + "AutoAssociate":{ + "shape":"Boolean", + "documentation":"

Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.

" } }, - "documentation":"

The filter that specifies the billing groups and pricing plans to retrieve billing group information.

" + "documentation":"

The filter that specifies the billing groups and pricing plans to retrieve billing group information.

" }, "ListBillingGroupsInput":{ "type":"structure", @@ -1944,18 +1970,18 @@ "members":{ "Names":{ "shape":"CustomLineItemNameList", - "documentation":"

A list of custom line items to retrieve information.

" + "documentation":"

A list of custom line items to retrieve information.

" }, "BillingGroups":{ "shape":"BillingGroupArnList", - "documentation":"

The billing group Amazon Resource Names (ARNs) to retrieve information.

" + "documentation":"

The billing group Amazon Resource Names (ARNs) to retrieve information.

" }, "Arns":{ "shape":"CustomLineItemArns", - "documentation":"

A list of custom line item ARNs to retrieve information.

" + "documentation":"

A list of custom line item ARNs to retrieve information.

" } }, - "documentation":"

A filter that specifies the custom line items and billing groups to retrieve FFLI information.

" + "documentation":"

A filter that specifies the custom line items and billing groups to retrieve FFLI information.

" }, "ListCustomLineItemsInput":{ "type":"structure", @@ -2039,10 +2065,10 @@ "members":{ "Arns":{ "shape":"PricingPlanArns", - "documentation":"

A list of pricing plan Amazon Resource Names (ARNs) to retrieve information.

" + "documentation":"

A list of pricing plan Amazon Resource Names (ARNs) to retrieve information.

" } }, - "documentation":"

The filter that specifies the Amazon Resource Names (ARNs) of pricing plans, to retrieve pricing plan information.

" + "documentation":"

The filter that specifies the Amazon Resource Names (ARNs) of pricing plans, to retrieve pricing plan information.

" }, "ListPricingPlansInput":{ "type":"structure", @@ -2130,10 +2156,10 @@ "members":{ "Arns":{ "shape":"PricingRuleArns", - "documentation":"

A list containing the pricing rule Amazon Resource Names (ARNs) to include in the API response.

" + "documentation":"

A list containing the pricing rule Amazon Resource Names (ARNs) to include in the API response.

" } }, - "documentation":"

The filter that specifies criteria that the pricing rules returned by the ListPricingRules API will adhere to.

" + "documentation":"

The filter that specifies criteria that the pricing rules returned by the ListPricingRules API will adhere to.

" }, "ListPricingRulesInput":{ "type":"structure", @@ -2351,30 +2377,30 @@ "members":{ "Name":{ "shape":"PricingPlanName", - "documentation":"

The name of a pricing plan.

" + "documentation":"

The name of a pricing plan.

" }, "Arn":{ "shape":"PricingPlanArn", - "documentation":"

The pricing plan Amazon Resource Names (ARN). This can be used to uniquely identify a pricing plan.

" + "documentation":"

The pricing plan Amazon Resource Names (ARN). This can be used to uniquely identify a pricing plan.

" }, "Description":{ "shape":"PricingPlanDescription", - "documentation":"

The pricing plan description.

" + "documentation":"

The pricing plan description.

" }, "Size":{ "shape":"NumberOfAssociatedPricingRules", - "documentation":"

The pricing rules count that's currently associated with this pricing plan list element.

" + "documentation":"

The pricing rules count that's currently associated with this pricing plan list element.

" }, "CreationTime":{ "shape":"Instant", - "documentation":"

The time when the pricing plan was created.

" + "documentation":"

The time when the pricing plan was created.

" }, "LastModifiedTime":{ "shape":"Instant", - "documentation":"

The most recent time when the pricing plan was modified.

" + "documentation":"

The most recent time when the pricing plan was modified.

" } }, - "documentation":"

A representation of a pricing plan.

" + "documentation":"

A representation of a pricing plan.

" }, "PricingPlanName":{ "type":"string", @@ -2420,43 +2446,43 @@ "members":{ "Name":{ "shape":"PricingRuleName", - "documentation":"

The name of a pricing rule.

" + "documentation":"

The name of a pricing rule.

" }, "Arn":{ "shape":"PricingRuleArn", - "documentation":"

The Amazon Resource Name (ARN) used to uniquely identify a pricing rule.

" + "documentation":"

The Amazon Resource Name (ARN) used to uniquely identify a pricing rule.

" }, "Description":{ "shape":"PricingRuleDescription", - "documentation":"

The pricing rule description.

" + "documentation":"

The pricing rule description.

" }, "Scope":{ "shape":"PricingRuleScope", - "documentation":"

The scope of pricing rule that indicates if it is globally applicable, or if it is service-specific.

" + "documentation":"

The scope of pricing rule that indicates if it is globally applicable, or if it is service-specific.

" }, "Type":{ "shape":"PricingRuleType", - "documentation":"

The type of pricing rule.

" + "documentation":"

The type of pricing rule.

" }, "ModifierPercentage":{ "shape":"ModifierPercentage", - "documentation":"

A percentage modifier applied on the public pricing rates.

" + "documentation":"

A percentage modifier applied on the public pricing rates.

" }, "Service":{ "shape":"Service", - "documentation":"

If the Scope attribute is SERVICE, this attribute indicates which service the PricingRule is applicable for.

" + "documentation":"

If the Scope attribute is SERVICE, this attribute indicates which service the PricingRule is applicable for.

" }, "AssociatedPricingPlanCount":{ "shape":"NumberOfPricingPlansAssociatedWith", - "documentation":"

The pricing plans count that this pricing rule is associated with.

" + "documentation":"

The pricing plans count that this pricing rule is associated with.

" }, "CreationTime":{ "shape":"Instant", - "documentation":"

The time when the pricing rule was created.

" + "documentation":"

The time when the pricing rule was created.

" }, "LastModifiedTime":{ "shape":"Instant", - "documentation":"

The most recent time when the pricing rule was modified.

" + "documentation":"

The most recent time when the pricing rule was modified.

" }, "BillingEntity":{ "shape":"BillingEntity", @@ -2475,7 +2501,7 @@ "documentation":"

Operation is the specific Amazon Web Services action covered by this line item. This describes the specific usage of the line item.

If the Scope attribute is set to SKU, this attribute indicates which operation the PricingRule is modifying. For example, a value of RunInstances:0202 indicates the operation of running an Amazon EC2 instance.

" } }, - "documentation":"

A representation of a pricing rule.

" + "documentation":"

A representation of a pricing rule.

" }, "PricingRuleName":{ "type":"string", @@ -2676,6 +2702,16 @@ "members":{ } }, + "UpdateBillingGroupAccountGrouping":{ + "type":"structure", + "members":{ + "AutoAssociate":{ + "shape":"Boolean", + "documentation":"

Specifies if this billing group will automatically associate newly added Amazon Web Services accounts that join your consolidated billing family.

" + } + }, + "documentation":"

Specifies if the billing group has the following features enabled.

" + }, "UpdateBillingGroupInput":{ "type":"structure", "required":["Arn"], @@ -2699,6 +2735,10 @@ "Description":{ "shape":"BillingGroupDescription", "documentation":"

A description of the billing group.

" + }, + "AccountGrouping":{ + "shape":"UpdateBillingGroupAccountGrouping", + "documentation":"

Specifies if the billing group has automatic account association (AutoAssociate) enabled.

" } } }, @@ -2740,6 +2780,10 @@ "StatusReason":{ "shape":"BillingGroupStatusReason", "documentation":"

The reason why the billing group is in its current status.

" + }, + "AccountGrouping":{ + "shape":"UpdateBillingGroupAccountGrouping", + "documentation":"

Specifies if the billing group has automatic account association (AutoAssociate) enabled.

" } } }, @@ -3007,7 +3051,7 @@ "documentation":"

The fields that caused the error, if applicable.

" } }, - "documentation":"

The input doesn't match with the constraints specified by Amazon Web Services services.

", + "documentation":"

The input doesn't match with the constraints specified by Amazon Web Services.

", "error":{ "httpStatusCode":400, "senderFault":true @@ -3023,14 +3067,14 @@ "members":{ "Name":{ "shape":"String", - "documentation":"

The field name.

" + "documentation":"

The field name.

" }, "Message":{ "shape":"String", - "documentation":"

The message describing why the field failed validation.

" + "documentation":"

The message describing why the field failed validation.

" } }, - "documentation":"

The field's information of a request that resulted in an exception.

" + "documentation":"

The field's information of a request that resulted in an exception.

" }, "ValidationExceptionFieldList":{ "type":"list", @@ -3095,7 +3139,9 @@ "ILLEGAL_OPERATION", "ILLEGAL_USAGE_TYPE", "INVALID_SKU_COMBO", - "INVALID_FILTER" + "INVALID_FILTER", + "TOO_MANY_AUTO_ASSOCIATE_BILLING_GROUPS", + "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP" ] } }, diff --git a/services/braket/pom.xml b/services/braket/pom.xml index 1f1b12e9add..a6f0a8a5239 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 42cdef447ad..1d242abad21 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/budgets/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/budgets/src/main/resources/codegen-resources/endpoint-rule-set.json index 437b6b8a01a..c7e70d266ca 100644 --- a/services/budgets/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/budgets/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "parseURL", + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -68,81 +115,27 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseDualStack" + "ref": "PartitionResult" }, - true + "name" ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" }, - "name" + "aws" ] }, - "aws" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ { "ref": "UseFIPS" }, - true + false ] }, { @@ -151,232 +144,76 @@ { "ref": "UseDualStack" }, - true + false ] } ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://budgets.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "endpoint": { - "url": "https://budgets-fips.{Region}.api.aws", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "budgets" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-east-1" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://budgets-fips.{Region}.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "budgets" - } - ] + "ref": "PartitionResult" }, - "headers": {} - }, - "type": "endpoint" - } + "name" + ] + }, + "aws-cn" ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ { "fn": "booleanEquals", "argv": [ { - "ref": "UseDualStack" + "ref": "UseFIPS" }, - true + false ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [], - "endpoint": { - "url": "https://budgets.{Region}.api.aws", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "budgets" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } + "ref": "UseDualStack" + }, + false ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "endpoint": { - "url": "https://budgets.amazonaws.com", + "url": "https://budgets.amazonaws.com.cn", "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "budgets" + "signingName": "budgets", + "signingRegion": "cn-northwest-1" } ] }, "headers": {} }, "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [ { @@ -438,16 +275,8 @@ { "conditions": [], "endpoint": { - "url": "https://budgets-fips.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "budgets" - } - ] - }, + "url": "https://budgets-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" @@ -498,16 +327,8 @@ { "conditions": [], "endpoint": { - "url": "https://budgets-fips.{Region}.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "budgets" - } - ] - }, + "url": "https://budgets-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" @@ -558,16 +379,8 @@ { "conditions": [], "endpoint": { - "url": "https://budgets.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "budgets" - } - ] - }, + "url": "https://budgets.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" @@ -581,269 +394,6 @@ } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://budgets.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "budgets" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://budgets-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://budgets-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://budgets.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://budgets.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "budgets" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://budgets.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "budgets" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { @@ -856,6 +406,11 @@ ] } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/budgets/src/main/resources/codegen-resources/endpoint-tests.json b/services/budgets/src/main/resources/codegen-resources/endpoint-tests.json index 150927daf24..073c32f8f15 100644 --- a/services/budgets/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/budgets/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,5 +1,88 @@ { "testCases": [ + { + "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://budgets.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://budgets-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://budgets-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://budgets.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://budgets.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", "expect": { @@ -17,13 +100,52 @@ } }, "params": { - "UseDualStack": false, "Region": "aws-cn-global", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://budgets-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://budgets-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://budgets.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { @@ -31,30 +153,191 @@ { "name": "sigv4", "signingName": "budgets", - "signingRegion": "us-east-1" + "signingRegion": "cn-northwest-1" } ] }, - "url": "https://budgets.amazonaws.com" + "url": "https://budgets.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "aws-global", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://budgets-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://budgets-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://budgets.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://budgets.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://budgets-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://budgets.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://budgets-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://budgets.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -64,9 +347,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -76,11 +359,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/budgets/src/main/resources/codegen-resources/service-2.json b/services/budgets/src/main/resources/codegen-resources/service-2.json index ca7cbd03356..e516db9b4d4 100644 --- a/services/budgets/src/main/resources/codegen-resources/service-2.json +++ b/services/budgets/src/main/resources/codegen-resources/service-2.json @@ -643,7 +643,7 @@ "members":{ "BudgetName":{ "shape":"BudgetName", - "documentation":"

The name of a budget. The name must be unique within an account. The : and \\ characters aren't allowed in BudgetName.

" + "documentation":"

The name of a budget. The name must be unique within an account. The : and \\ characters, and the \"/action/\" substring, aren't allowed in BudgetName.

" }, "BudgetLimit":{ "shape":"Spend", @@ -690,10 +690,10 @@ }, "BudgetName":{ "type":"string", - "documentation":"

A string that represents the budget name. The \":\" and \"\\\" characters aren't allowed.

", + "documentation":"

A string that represents the budget name. The \":\" and \"\\\" characters, and the \"/action/\" substring, aren't allowed.

", "max":100, "min":1, - "pattern":"[^:\\\\]+" + "pattern":"^(?![^:\\\\]*/action/)[^:\\\\]+$" }, "BudgetNotificationsForAccount":{ "type":"structure", @@ -1908,7 +1908,7 @@ "members":{ "Message":{"shape":"errorMessage"} }, - "documentation":"

The number of API requests has exceeded the maximum allowed API request throttling limit for the account.

", + "documentation":"

The number of API requests has exceeded the maximum allowed API request throttling limit for the account.

", "exception":true }, "TimePeriod":{ diff --git a/services/chime/pom.xml b/services/chime/pom.xml index 7824a40297b..07fbf715749 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml index 1b0b021971f..69830834d09 100644 --- a/services/chimesdkidentity/pom.xml +++ b/services/chimesdkidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT chimesdkidentity AWS Java SDK :: Services :: Chime SDK Identity diff --git a/services/chimesdkmediapipelines/pom.xml b/services/chimesdkmediapipelines/pom.xml index 5a2afca4240..7f2dc6752c3 100644 --- a/services/chimesdkmediapipelines/pom.xml +++ b/services/chimesdkmediapipelines/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT chimesdkmediapipelines AWS Java SDK :: Services :: Chime SDK Media Pipelines diff --git a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/service-2.json b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/service-2.json index 13764920c16..5a045f8a262 100644 --- a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/service-2.json +++ b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/service-2.json @@ -394,6 +394,25 @@ } }, "shapes":{ + "ActiveSpeakerOnlyConfiguration":{ + "type":"structure", + "members":{ + "ActiveSpeakerPosition":{ + "shape":"ActiveSpeakerPosition", + "documentation":"

The position of the ActiveSpeakerOnly video tile.

" + } + }, + "documentation":"

Defines the configuration for an ActiveSpeakerOnly video tile.

" + }, + "ActiveSpeakerPosition":{ + "type":"string", + "enum":[ + "TopLeft", + "TopRight", + "BottomLeft", + "BottomRight" + ] + }, "AmazonResourceName":{ "type":"string", "max":1011, @@ -461,7 +480,6 @@ }, "AmazonTranscribeProcessorConfiguration":{ "type":"structure", - "required":["LanguageCode"], "members":{ "LanguageCode":{ "shape":"CallAnalyticsLanguageCode", @@ -510,6 +528,26 @@ "FilterPartialResults":{ "shape":"Boolean", "documentation":"

If true, TranscriptEvents with IsPartial: true are filtered out of the insights target.

" + }, + "IdentifyLanguage":{ + "shape":"Boolean", + "documentation":"

Turns language identification on or off.

" + }, + "LanguageOptions":{ + "shape":"LanguageOptions", + "documentation":"

The language options for the transcription, such as automatic language detection.

" + }, + "PreferredLanguage":{ + "shape":"CallAnalyticsLanguageCode", + "documentation":"

The preferred language for the transcription.

" + }, + "VocabularyNames":{ + "shape":"VocabularyNames", + "documentation":"

The names of the custom vocabulary or vocabularies used during transcription.

" + }, + "VocabularyFilterNames":{ + "shape":"VocabularyFilterNames", + "documentation":"

The names of the custom vocabulary filter or filters using during transcription.

" } }, "documentation":"

A structure that contains the configuration settings for an Amazon Transcribe processor.

" @@ -670,6 +708,22 @@ "exception":true }, "Boolean":{"type":"boolean"}, + "BorderColor":{ + "type":"string", + "enum":[ + "Black", + "Blue", + "Red", + "Green", + "White", + "Yellow" + ] + }, + "BorderThickness":{ + "type":"integer", + "max":20, + "min":1 + }, "CallAnalyticsLanguageCode":{ "type":"string", "enum":[ @@ -684,6 +738,13 @@ "pt-BR" ] }, + "CanvasOrientation":{ + "type":"string", + "enum":[ + "Landscape", + "Portrait" + ] + }, "CategoryName":{ "type":"string", "max":200, @@ -922,13 +983,19 @@ "enum":[ "PresenterOnly", "Horizontal", - "Vertical" + "Vertical", + "ActiveSpeakerOnly" ] }, "ContentType":{ "type":"string", "enum":["PII"] }, + "CornerRadius":{ + "type":"integer", + "max":20, + "min":1 + }, "CreateMediaCapturePipelineRequest":{ "type":"structure", "required":[ @@ -1079,7 +1146,7 @@ }, "S3RecordingSinkRuntimeConfiguration":{ "shape":"S3RecordingSinkRuntimeConfiguration", - "documentation":"

The runtime configuration for the S3 recording sink.

" + "documentation":"

The runtime configuration for the S3 recording sink. If specified, the settings in this structure override any settings in S3RecordingSinkConfiguration.

" }, "Tags":{ "shape":"TagList", @@ -1326,6 +1393,26 @@ "PresenterOnlyConfiguration":{ "shape":"PresenterOnlyConfiguration", "documentation":"

Defines the configuration options for a presenter only video tile.

" + }, + "ActiveSpeakerOnlyConfiguration":{ + "shape":"ActiveSpeakerOnlyConfiguration", + "documentation":"

The configuration settings for an ActiveSpeakerOnly video tile.

" + }, + "HorizontalLayoutConfiguration":{ + "shape":"HorizontalLayoutConfiguration", + "documentation":"

The configuration settings for a horizontal layout.

" + }, + "VerticalLayoutConfiguration":{ + "shape":"VerticalLayoutConfiguration", + "documentation":"

The configuration settings for a vertical layout.

" + }, + "VideoAttribute":{ + "shape":"VideoAttribute", + "documentation":"

The attribute settings for the video tiles.

" + }, + "CanvasOrientation":{ + "shape":"CanvasOrientation", + "documentation":"

The orientation setting, horizontal or vertical.

" } }, "documentation":"

Specifies the type of grid layout.

" @@ -1336,6 +1423,46 @@ "min":36, "pattern":"[a-fA-F0-9]{8}(?:-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}" }, + "HighlightColor":{ + "type":"string", + "enum":[ + "Black", + "Blue", + "Red", + "Green", + "White", + "Yellow" + ] + }, + "HorizontalLayoutConfiguration":{ + "type":"structure", + "members":{ + "TileOrder":{ + "shape":"TileOrder", + "documentation":"

Sets the automatic ordering of the video tiles.

" + }, + "TilePosition":{ + "shape":"HorizontalTilePosition", + "documentation":"

Sets the position of horizontal tiles.

" + }, + "TileCount":{ + "shape":"TileCount", + "documentation":"

The maximum number of video tiles to display.

" + }, + "TileAspectRatio":{ + "shape":"TileAspectRatio", + "documentation":"

Sets the aspect ratio of the video tiles, such as 16:9.

" + } + }, + "documentation":"

Defines the configuration settings for the horizontal layout.

" + }, + "HorizontalTilePosition":{ + "type":"string", + "enum":[ + "Top", + "Bottom" + ] + }, "Iso8601Timestamp":{ "type":"timestamp", "timestampFormat":"iso8601" @@ -1452,6 +1579,12 @@ }, "documentation":"

A structure that contains the configuration settings for an AWS Lambda function's data sink.

" }, + "LanguageOptions":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[a-zA-Z-,]+" + }, "LayoutOption":{ "type":"string", "enum":["GridView"] @@ -2540,6 +2673,22 @@ "error":{"httpStatusCode":429}, "exception":true }, + "TileAspectRatio":{ + "type":"string", + "pattern":"^\\d{1,2}\\/\\d{1,2}$" + }, + "TileCount":{ + "type":"integer", + "max":10, + "min":1 + }, + "TileOrder":{ + "type":"string", + "enum":[ + "JoinSequence", + "SpeakerSequence" + ] + }, "Timestamp":{"type":"timestamp"}, "TimestampRange":{ "type":"structure", @@ -2662,6 +2811,35 @@ } } }, + "VerticalLayoutConfiguration":{ + "type":"structure", + "members":{ + "TileOrder":{ + "shape":"TileOrder", + "documentation":"

Sets the automatic ordering of the video tiles.

" + }, + "TilePosition":{ + "shape":"VerticalTilePosition", + "documentation":"

Sets the position of vertical tiles.

" + }, + "TileCount":{ + "shape":"TileCount", + "documentation":"

The maximum number of tiles to display.

" + }, + "TileAspectRatio":{ + "shape":"TileAspectRatio", + "documentation":"

Sets the aspect ratio of the video tiles, such as 16:9.

" + } + }, + "documentation":"

Defines the configuration settings for a vertial layout.

" + }, + "VerticalTilePosition":{ + "type":"string", + "enum":[ + "Left", + "Right" + ] + }, "VideoArtifactsConfiguration":{ "type":"structure", "required":["State"], @@ -2677,6 +2855,28 @@ }, "documentation":"

The video artifact configuration object.

" }, + "VideoAttribute":{ + "type":"structure", + "members":{ + "CornerRadius":{ + "shape":"CornerRadius", + "documentation":"

Sets the corner radius of all video tiles.

" + }, + "BorderColor":{ + "shape":"BorderColor", + "documentation":"

Defines the border color of all video tiles.

" + }, + "HighlightColor":{ + "shape":"HighlightColor", + "documentation":"

Defines the highlight color for the active video tile.

" + }, + "BorderThickness":{ + "shape":"BorderThickness", + "documentation":"

Defines the border thickness for all video tiles.

" + } + }, + "documentation":"

Defines the settings for a video tile.

" + }, "VideoConcatenationConfiguration":{ "type":"structure", "required":["State"], @@ -2706,12 +2906,24 @@ "min":1, "pattern":"^[0-9a-zA-Z._-]+" }, + "VocabularyFilterNames":{ + "type":"string", + "max":3000, + "min":1, + "pattern":"^[a-zA-Z0-9,-._]+" + }, "VocabularyName":{ "type":"string", "max":200, "min":1, "pattern":"^[0-9a-zA-Z._-]+" }, + "VocabularyNames":{ + "type":"string", + "max":3000, + "min":1, + "pattern":"^[a-zA-Z0-9,-._]+" + }, "VoiceAnalyticsConfigurationStatus":{ "type":"string", "enum":[ diff --git a/services/chimesdkmeetings/pom.xml b/services/chimesdkmeetings/pom.xml index b0ec8fa26e2..2a490374a84 100644 --- a/services/chimesdkmeetings/pom.xml +++ b/services/chimesdkmeetings/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT chimesdkmeetings AWS Java SDK :: Services :: Chime SDK Meetings diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml index c0e210fac03..f4558557dfc 100644 --- a/services/chimesdkmessaging/pom.xml +++ b/services/chimesdkmessaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT chimesdkmessaging AWS Java SDK :: Services :: Chime SDK Messaging diff --git a/services/chimesdkvoice/pom.xml b/services/chimesdkvoice/pom.xml index 6c81e89235c..f006831979b 100644 --- a/services/chimesdkvoice/pom.xml +++ b/services/chimesdkvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT chimesdkvoice AWS Java SDK :: Services :: Chime SDK Voice diff --git a/services/chimesdkvoice/src/main/resources/codegen-resources/endpoint-tests.json b/services/chimesdkvoice/src/main/resources/codegen-resources/endpoint-tests.json index efca5932bf4..9150af50207 100644 --- a/services/chimesdkvoice/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/chimesdkvoice/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-east-1" } }, { @@ -21,9 +21,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-gov-east-1" } }, { @@ -34,9 +34,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-east-1" } }, { @@ -47,9 +47,9 @@ } }, "params": { - "Region": "us-gov-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-gov-east-1" } }, { @@ -60,9 +60,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-north-1" } }, { @@ -73,9 +73,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "cn-north-1" } }, { @@ -86,9 +86,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-north-1" } }, { @@ -99,9 +99,9 @@ } }, "params": { - "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "cn-north-1" } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-east-1" } }, { @@ -123,9 +123,9 @@ } }, "params": { - "Region": "us-iso-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-iso-east-1" } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-east-1" } }, { @@ -147,9 +147,9 @@ } }, "params": { - "Region": "us-iso-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-iso-east-1" } }, { @@ -160,9 +160,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -173,9 +173,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -186,9 +186,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -199,9 +199,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-isob-east-1" } }, { @@ -223,9 +223,9 @@ } }, "params": { - "Region": "us-isob-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-isob-east-1" } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-isob-east-1" } }, { @@ -247,9 +247,9 @@ } }, "params": { - "Region": "us-isob-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-isob-east-1" } }, { @@ -260,9 +260,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": true, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } } diff --git a/services/chimesdkvoice/src/main/resources/codegen-resources/service-2.json b/services/chimesdkvoice/src/main/resources/codegen-resources/service-2.json index b94a9295a12..7347ef39db0 100644 --- a/services/chimesdkvoice/src/main/resources/codegen-resources/service-2.json +++ b/services/chimesdkvoice/src/main/resources/codegen-resources/service-2.json @@ -2228,6 +2228,10 @@ "E164PhoneNumbers":{ "shape":"E164PhoneNumberList", "documentation":"

List of phone numbers, in E.164 format.

" + }, + "Name":{ + "shape":"PhoneNumberName", + "documentation":"

Specifies the name assigned to one or more phone numbers.

" } } }, @@ -4024,6 +4028,10 @@ "OrderId":{ "shape":"GuidString", "documentation":"

The phone number's order ID.

" + }, + "Name":{ + "shape":"PhoneNumberName", + "documentation":"

The name of the phone number.

" } }, "documentation":"

A phone number used to call an Amazon Chime SDK Voice Connector.

" @@ -4137,6 +4145,13 @@ "max":500, "min":1 }, + "PhoneNumberName":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^$|^[a-zA-Z0-9\\,\\.\\_\\-]+(\\s+[a-zA-Z0-9\\,\\.\\_\\-]+)*$", + "sensitive":true + }, "PhoneNumberOrder":{ "type":"structure", "members":{ @@ -5342,6 +5357,10 @@ "CallingName":{ "shape":"CallingName", "documentation":"

The outbound calling name associated with the phone number.

" + }, + "Name":{ + "shape":"PhoneNumberName", + "documentation":"

Specifies the name assigned to one or more phone numbers.

" } } }, @@ -5360,6 +5379,10 @@ "CallingName":{ "shape":"CallingName", "documentation":"

The outbound calling name to update.

" + }, + "Name":{ + "shape":"PhoneNumberName", + "documentation":"

The name of the phone number.

" } }, "documentation":"

The phone number ID, product type, or calling name fields to update, used with the BatchUpdatePhoneNumber and UpdatePhoneNumber actions.

" diff --git a/services/cleanrooms/pom.xml b/services/cleanrooms/pom.xml index a4af911ccb5..18dafc6f301 100644 --- a/services/cleanrooms/pom.xml +++ b/services/cleanrooms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cleanrooms AWS Java SDK :: Services :: Clean Rooms diff --git a/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json b/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json index 256ebd8e2db..a128e3bf3db 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cleanrooms/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -110,9 +110,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -123,9 +123,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -134,9 +134,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -147,9 +147,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -160,9 +160,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -173,9 +173,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -186,9 +186,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -199,9 +199,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -247,9 +247,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -260,9 +260,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -272,9 +272,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -284,9 +284,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } } diff --git a/services/cleanrooms/src/main/resources/codegen-resources/paginators-1.json b/services/cleanrooms/src/main/resources/codegen-resources/paginators-1.json index 52d02290f00..62fee6a7230 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/paginators-1.json +++ b/services/cleanrooms/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,17 @@ { "pagination": { + "ListAnalysisTemplates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "analysisTemplateSummaries" + }, + "ListCollaborationAnalysisTemplates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "collaborationAnalysisTemplateSummaries" + }, "ListCollaborations": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/cleanrooms/src/main/resources/codegen-resources/service-2.json b/services/cleanrooms/src/main/resources/codegen-resources/service-2.json index 6c2c346e492..5560200444a 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/service-2.json +++ b/services/cleanrooms/src/main/resources/codegen-resources/service-2.json @@ -12,6 +12,24 @@ "uid":"cleanrooms-2022-02-17" }, "operations":{ + "BatchGetCollaborationAnalysisTemplate":{ + "name":"BatchGetCollaborationAnalysisTemplate", + "http":{ + "method":"POST", + "requestUri":"/collaborations/{collaborationIdentifier}/batch-analysistemplates", + "responseCode":200 + }, + "input":{"shape":"BatchGetCollaborationAnalysisTemplateInput"}, + "output":{"shape":"BatchGetCollaborationAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves multiple analysis templates within a collaboration by their Amazon Resource Names (ARNs).

" + }, "BatchGetSchema":{ "name":"BatchGetSchema", "http":{ @@ -30,6 +48,26 @@ ], "documentation":"

Retrieves multiple schemas by their identifiers.

" }, + "CreateAnalysisTemplate":{ + "name":"CreateAnalysisTemplate", + "http":{ + "method":"POST", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates", + "responseCode":200 + }, + "input":{"shape":"CreateAnalysisTemplateInput"}, + "output":{"shape":"CreateAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a new analysis template.

" + }, "CreateCollaboration":{ "name":"CreateCollaboration", "http":{ @@ -129,6 +167,25 @@ ], "documentation":"

Creates a membership for a specific collaboration identifier and joins the collaboration.

" }, + "DeleteAnalysisTemplate":{ + "name":"DeleteAnalysisTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates/{analysisTemplateIdentifier}", + "responseCode":204 + }, + "input":{"shape":"DeleteAnalysisTemplateInput"}, + "output":{"shape":"DeleteAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes an analysis template.

", + "idempotent":true + }, "DeleteCollaboration":{ "name":"DeleteCollaboration", "http":{ @@ -247,6 +304,24 @@ "documentation":"

Deletes a specified membership. All resources under a membership must be deleted.

", "idempotent":true }, + "GetAnalysisTemplate":{ + "name":"GetAnalysisTemplate", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates/{analysisTemplateIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetAnalysisTemplateInput"}, + "output":{"shape":"GetAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves an analysis template.

" + }, "GetCollaboration":{ "name":"GetCollaboration", "http":{ @@ -264,6 +339,24 @@ ], "documentation":"

Returns metadata about a collaboration.

" }, + "GetCollaborationAnalysisTemplate":{ + "name":"GetCollaborationAnalysisTemplate", + "http":{ + "method":"GET", + "requestUri":"/collaborations/{collaborationIdentifier}/analysistemplates/{analysisTemplateArn}", + "responseCode":200 + }, + "input":{"shape":"GetCollaborationAnalysisTemplateInput"}, + "output":{"shape":"GetCollaborationAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves an analysis template within a collaboration.

" + }, "GetConfiguredTable":{ "name":"GetConfiguredTable", "http":{ @@ -390,6 +483,42 @@ ], "documentation":"

Retrieves a schema analysis rule.

" }, + "ListAnalysisTemplates":{ + "name":"ListAnalysisTemplates", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates", + "responseCode":200 + }, + "input":{"shape":"ListAnalysisTemplatesInput"}, + "output":{"shape":"ListAnalysisTemplatesOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists analysis templates that the caller owns.

" + }, + "ListCollaborationAnalysisTemplates":{ + "name":"ListCollaborationAnalysisTemplates", + "http":{ + "method":"GET", + "requestUri":"/collaborations/{collaborationIdentifier}/analysistemplates", + "responseCode":200 + }, + "input":{"shape":"ListCollaborationAnalysisTemplatesInput"}, + "output":{"shape":"ListCollaborationAnalysisTemplatesOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists analysis templates within a collaboration.

" + }, "ListCollaborations":{ "name":"ListCollaborations", "http":{ @@ -577,6 +706,24 @@ ], "documentation":"

Removes a tag or list of tags from a resource.

" }, + "UpdateAnalysisTemplate":{ + "name":"UpdateAnalysisTemplate", + "http":{ + "method":"PATCH", + "requestUri":"/memberships/{membershipIdentifier}/analysistemplates/{analysisTemplateIdentifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateAnalysisTemplateInput"}, + "output":{"shape":"UpdateAnalysisTemplateOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates the analysis template metadata.

" + }, "UpdateCollaboration":{ "name":"UpdateCollaboration", "http":{ @@ -795,10 +942,43 @@ "max":100, "min":1 }, + "AnalysisFormat":{ + "type":"string", + "enum":["SQL"] + }, "AnalysisMethod":{ "type":"string", "enum":["DIRECT_QUERY"] }, + "AnalysisParameter":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"ParameterName", + "documentation":"

The name of the parameter. The name must use only alphanumeric, underscore (_), or hyphen (-) characters but cannot start or end with a hyphen.

" + }, + "type":{ + "shape":"ParameterType", + "documentation":"

The type of parameter.

" + }, + "defaultValue":{ + "shape":"ParameterValue", + "documentation":"

Optional. The default value that is applied in the analysis template. The member who can query can override this value in the query editor.

" + } + }, + "documentation":"

Optional. The member who can query can provide this placeholder for a literal data value in an analysis template.

", + "sensitive":true + }, + "AnalysisParameterList":{ + "type":"list", + "member":{"shape":"AnalysisParameter"}, + "max":10, + "min":0 + }, "AnalysisRule":{ "type":"structure", "required":[ @@ -816,7 +996,7 @@ }, "type":{ "shape":"AnalysisRuleType", - "documentation":"

The type of analysis rule. Valid values are `AGGREGATION` and `LIST`.

" + "documentation":"

The type of analysis rule.

" }, "name":{ "shape":"TableAlias", @@ -876,7 +1056,7 @@ "documentation":"

Columns that must meet a specific threshold value (after an aggregation function is applied to it) for each output row to be returned.

" } }, - "documentation":"

Enables query structure and specified queries that produce aggregate statistics.

" + "documentation":"

A type of analysis rule that enables query structure and specified queries that produce aggregate statistics.

" }, "AnalysisRuleAggregationAggregateColumnsList":{ "type":"list", @@ -893,6 +1073,31 @@ "min":1, "pattern":"[a-z0-9_](([a-z0-9_ ]+-)*([a-z0-9_ ]+))?" }, + "AnalysisRuleCustom":{ + "type":"structure", + "required":["allowedAnalyses"], + "members":{ + "allowedAnalyses":{ + "shape":"AnalysisRuleCustomAllowedAnalysesList", + "documentation":"

The analysis templates that are allowed by the custom analysis rule.

" + }, + "allowedAnalysisProviders":{ + "shape":"AnalysisRuleCustomAllowedAnalysisProvidersList", + "documentation":"

The Amazon Web Services accounts that are allowed to query by the custom analysis rule. Required when allowedAnalyses is ANY_QUERY.

" + } + }, + "documentation":"

A type of analysis rule that enables the table owner to approve custom SQL queries on their configured tables.

" + }, + "AnalysisRuleCustomAllowedAnalysesList":{ + "type":"list", + "member":{"shape":"AnalysisTemplateArnOrQueryWildcard"}, + "min":0 + }, + "AnalysisRuleCustomAllowedAnalysisProvidersList":{ + "type":"list", + "member":{"shape":"AccountId"}, + "min":0 + }, "AnalysisRuleList":{ "type":"structure", "required":[ @@ -906,7 +1111,7 @@ }, "allowedJoinOperators":{ "shape":"JoinOperatorsList", - "documentation":"

Which logical operators (if any) are to be used in an INNER JOIN match condition. Default is AND.

" + "documentation":"

The logical operators (if any) that are to be used in an INNER JOIN match condition. Default is AND.

" }, "listColumns":{ "shape":"AnalysisRuleColumnList", @@ -925,10 +1130,10 @@ "members":{ "v1":{ "shape":"AnalysisRulePolicyV1", - "documentation":"

Controls on the query specifications that can be run on configured table..

" + "documentation":"

Controls on the query specifications that can be run on configured table.

" } }, - "documentation":"

Controls on the query specifications that can be run on configured table..

", + "documentation":"

Controls on the query specifications that can be run on configured table.

", "union":true }, "AnalysisRulePolicyV1":{ @@ -941,22 +1146,280 @@ "aggregation":{ "shape":"AnalysisRuleAggregation", "documentation":"

Analysis rule type that enables only aggregation queries on a configured table.

" + }, + "custom":{ + "shape":"AnalysisRuleCustom", + "documentation":"

Analysis rule type that enables custom SQL queries on a configured table.

" } }, - "documentation":"

Controls on the query specifications that can be run on configured table..

", + "documentation":"

Controls on the query specifications that can be run on configured table.

", "union":true }, "AnalysisRuleType":{ "type":"string", "enum":[ "AGGREGATION", - "LIST" + "LIST", + "CUSTOM" ] }, "AnalysisRuleTypeList":{ "type":"list", "member":{"shape":"AnalysisRuleType"} }, + "AnalysisSchema":{ + "type":"structure", + "members":{ + "referencedTables":{ + "shape":"QueryTables", + "documentation":"

The tables referenced in the analysis schema.

" + } + }, + "documentation":"

A relation within an analysis.

" + }, + "AnalysisSource":{ + "type":"structure", + "members":{ + "text":{ + "shape":"AnalysisTemplateText", + "documentation":"

The query text.

" + } + }, + "documentation":"

The structure that defines the body of the analysis template.

", + "sensitive":true, + "union":true + }, + "AnalysisTemplate":{ + "type":"structure", + "required":[ + "id", + "arn", + "collaborationId", + "collaborationArn", + "membershipId", + "membershipArn", + "name", + "createTime", + "updateTime", + "schema", + "format", + "source" + ], + "members":{ + "id":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"

The identifier for the analysis template.

" + }, + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"

The Amazon Resource Name (ARN) of the analysis template.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

The unique ID for the associated collaboration of the analysis template.

" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The unique ARN for the analysis template’s associated collaboration.

" + }, + "membershipId":{ + "shape":"UUID", + "documentation":"

The identifier of a member who created the analysis template.

" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"

The Amazon Resource Name (ARN) of the member who created the analysis template.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the analysis template.

" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"

The name of the analysis template.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time that the analysis template was created.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The time that the analysis template was last updated.

" + }, + "schema":{ + "shape":"AnalysisSchema", + "documentation":"

The entire schema object.

" + }, + "format":{ + "shape":"AnalysisFormat", + "documentation":"

The format of the analysis template.

" + }, + "source":{ + "shape":"AnalysisSource", + "documentation":"

The source of the analysis template.

" + }, + "analysisParameters":{ + "shape":"AnalysisParameterList", + "documentation":"

The parameters of the analysis template.

" + } + }, + "documentation":"

The analysis template.

" + }, + "AnalysisTemplateArn":{ + "type":"string", + "max":200, + "min":0, + "pattern":"arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+" + }, + "AnalysisTemplateArnList":{ + "type":"list", + "member":{"shape":"AnalysisTemplateArn"}, + "max":10, + "min":1 + }, + "AnalysisTemplateArnOrQueryWildcard":{ + "type":"string", + "max":200, + "min":0, + "pattern":"(ANY_QUERY|arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+)" + }, + "AnalysisTemplateIdentifier":{ + "type":"string", + "max":36, + "min":36, + "pattern":".*[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}.*" + }, + "AnalysisTemplateSummary":{ + "type":"structure", + "required":[ + "arn", + "createTime", + "id", + "name", + "updateTime", + "membershipArn", + "membershipId", + "collaborationArn", + "collaborationId" + ], + "members":{ + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"

The Amazon Resource Name (ARN) of the analysis template.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time that the analysis template summary was created.

" + }, + "id":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"

The identifier of the analysis template.

" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"

The name of the analysis template.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The time that the analysis template summary was last updated.

" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"

The Amazon Resource Name (ARN) of the member who created the analysis template.

" + }, + "membershipId":{ + "shape":"UUID", + "documentation":"

The identifier for a membership resource.

" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The unique ARN for the analysis template summary’s associated collaboration.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

A unique identifier for the collaboration that the analysis template summary belongs to. Currently accepts collaboration ID.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the analysis template.

" + } + }, + "documentation":"

The metadata of the analysis template.

" + }, + "AnalysisTemplateSummaryList":{ + "type":"list", + "member":{"shape":"AnalysisTemplateSummary"} + }, + "AnalysisTemplateText":{ + "type":"string", + "max":15000, + "min":0 + }, + "BatchGetCollaborationAnalysisTemplateError":{ + "type":"structure", + "required":[ + "arn", + "code", + "message" + ], + "members":{ + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"

The Amazon Resource Name (ARN) of the analysis template.

" + }, + "code":{ + "shape":"String", + "documentation":"

An error code for the error.

" + }, + "message":{ + "shape":"String", + "documentation":"

A description of why the call failed.

" + } + }, + "documentation":"

Details of errors thrown by the call to retrieve multiple analysis templates within a collaboration by their identifiers.

" + }, + "BatchGetCollaborationAnalysisTemplateErrorList":{ + "type":"list", + "member":{"shape":"BatchGetCollaborationAnalysisTemplateError"}, + "max":10, + "min":0 + }, + "BatchGetCollaborationAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "collaborationIdentifier", + "analysisTemplateArns" + ], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"

A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.

", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "analysisTemplateArns":{ + "shape":"AnalysisTemplateArnList", + "documentation":"

The Amazon Resource Name (ARN) associated with the analysis template within a collaboration.

" + } + } + }, + "BatchGetCollaborationAnalysisTemplateOutput":{ + "type":"structure", + "required":[ + "collaborationAnalysisTemplates", + "errors" + ], + "members":{ + "collaborationAnalysisTemplates":{ + "shape":"CollaborationAnalysisTemplateList", + "documentation":"

The retrieved list of analysis templates within a collaboration.

" + }, + "errors":{ + "shape":"BatchGetCollaborationAnalysisTemplateErrorList", + "documentation":"

Error reasons for collaboration analysis templates that could not be retrieved. One error is returned for every collaboration analysis template that could not be retrieved.

" + } + } + }, "BatchGetSchemaError":{ "type":"structure", "required":[ @@ -1066,40 +1529,173 @@ "shape":"AccountId", "documentation":"

The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.

" }, - "creatorDisplayName":{ - "shape":"DisplayName", - "documentation":"

A display name of the collaboration creator.

" + "creatorDisplayName":{ + "shape":"DisplayName", + "documentation":"

A display name of the collaboration creator.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time when the collaboration was created.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The time the collaboration metadata was last updated.

" + }, + "memberStatus":{ + "shape":"MemberStatus", + "documentation":"

The status of a member in a collaboration.

" + }, + "membershipId":{ + "shape":"UUID", + "documentation":"

The unique ID for your membership within the collaboration.

" + }, + "membershipArn":{ + "shape":"MembershipArn", + "documentation":"

The unique ARN for your membership within the collaboration.

" + }, + "dataEncryptionMetadata":{ + "shape":"DataEncryptionMetadata", + "documentation":"

The settings for client-side encryption for cryptographic computing.

" + }, + "queryLogStatus":{ + "shape":"CollaborationQueryLogStatus", + "documentation":"

An indicator as to whether query logging has been enabled or disabled for the collaboration.

" + } + }, + "documentation":"

The multi-party data share environment. The collaboration contains metadata about its purpose and participants.

" + }, + "CollaborationAnalysisTemplate":{ + "type":"structure", + "required":[ + "id", + "arn", + "collaborationId", + "collaborationArn", + "creatorAccountId", + "name", + "createTime", + "updateTime", + "schema", + "format", + "source" + ], + "members":{ + "id":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"

The identifier of the analysis template.

" + }, + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"

The Amazon Resource Name (ARN) of the analysis template.

" + }, + "collaborationId":{ + "shape":"UUID", + "documentation":"

A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.

" + }, + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The unique ARN for the analysis template’s associated collaboration.

" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the analysis template.

" + }, + "creatorAccountId":{ + "shape":"AccountId", + "documentation":"

The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.

" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"

The name of the analysis template.

" + }, + "createTime":{ + "shape":"Timestamp", + "documentation":"

The time that the analysis template within a collaboration was created.

" + }, + "updateTime":{ + "shape":"Timestamp", + "documentation":"

The time that the analysis template in the collaboration was last updated.

" + }, + "schema":{ + "shape":"AnalysisSchema", + "documentation":"

The entire schema object.

" + }, + "format":{ + "shape":"AnalysisFormat", + "documentation":"

The format of the analysis template in the collaboration.

" + }, + "source":{ + "shape":"AnalysisSource", + "documentation":"

The source of the analysis template within a collaboration.

" + }, + "analysisParameters":{ + "shape":"AnalysisParameterList", + "documentation":"

The analysis parameters that have been specified in the analysis template.

" + } + }, + "documentation":"

The analysis template within a collaboration.

" + }, + "CollaborationAnalysisTemplateList":{ + "type":"list", + "member":{"shape":"CollaborationAnalysisTemplate"}, + "max":10, + "min":0 + }, + "CollaborationAnalysisTemplateSummary":{ + "type":"structure", + "required":[ + "arn", + "createTime", + "id", + "name", + "updateTime", + "collaborationArn", + "collaborationId", + "creatorAccountId" + ], + "members":{ + "arn":{ + "shape":"AnalysisTemplateArn", + "documentation":"

The Amazon Resource Name (ARN) of the analysis template.

" }, "createTime":{ "shape":"Timestamp", - "documentation":"

The time when the collaboration was created.

" + "documentation":"

The time that the summary of the analysis template in a collaboration was created.

" + }, + "id":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"

The identifier of the analysis template.

" + }, + "name":{ + "shape":"ResourceAlias", + "documentation":"

The name of the analysis template.

" }, "updateTime":{ "shape":"Timestamp", - "documentation":"

The time the collaboration metadata was last updated.

" + "documentation":"

The time that the summary of the analysis template in the collaboration was last updated.

" }, - "memberStatus":{ - "shape":"MemberStatus", - "documentation":"

The status of a member in a collaboration.

" + "collaborationArn":{ + "shape":"CollaborationArn", + "documentation":"

The unique ARN for the analysis template’s associated collaboration.

" }, - "membershipId":{ + "collaborationId":{ "shape":"UUID", - "documentation":"

The unique ID for your membership within the collaboration.

" - }, - "membershipArn":{ - "shape":"MembershipArn", - "documentation":"

The unique ARN for your membership within the collaboration.

" + "documentation":"

A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.

" }, - "dataEncryptionMetadata":{ - "shape":"DataEncryptionMetadata", - "documentation":"

The settings for client-side encryption for cryptographic computing.

" + "creatorAccountId":{ + "shape":"AccountId", + "documentation":"

The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.

" }, - "queryLogStatus":{ - "shape":"CollaborationQueryLogStatus", - "documentation":"

An indicator as to whether query logging has been enabled or disabled for the collaboration.

" + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the analysis template.

" } }, - "documentation":"

The multi-party data share environment. The collaboration contains metadata about its purpose and participants.

" + "documentation":"

The metadata of the analysis template within a collaboration.

" + }, + "CollaborationAnalysisTemplateSummaryList":{ + "type":"list", + "member":{"shape":"CollaborationAnalysisTemplateSummary"} }, "CollaborationArn":{ "type":"string", @@ -1270,7 +1866,7 @@ }, "analysisRuleTypes":{ "shape":"ConfiguredTableAnalysisRuleTypeList", - "documentation":"

The types of analysis rules associated with this configured table. Valid values are `AGGREGATION` and `LIST`. Currently, only one analysis rule may be associated with a configured table.

" + "documentation":"

The types of analysis rules associated with this configured table. Currently, only one analysis rule may be associated with a configured table.

" }, "analysisMethod":{ "shape":"AnalysisMethod", @@ -1308,7 +1904,7 @@ }, "type":{ "shape":"ConfiguredTableAnalysisRuleType", - "documentation":"

The type of configured table analysis rule. Valid values are `AGGREGATION` and `LIST`.

" + "documentation":"

The type of configured table analysis rule.

" }, "createTime":{ "shape":"Timestamp", @@ -1342,7 +1938,8 @@ "aggregation":{ "shape":"AnalysisRuleAggregation", "documentation":"

Analysis rule type that enables only aggregation queries on a configured table.

" - } + }, + "custom":{"shape":"AnalysisRuleCustom"} }, "documentation":"

Controls on the query specifications that can be run on a configured table.

", "union":true @@ -1351,7 +1948,8 @@ "type":"string", "enum":[ "AGGREGATION", - "LIST" + "LIST", + "CUSTOM" ] }, "ConfiguredTableAnalysisRuleTypeList":{ @@ -1362,7 +1960,7 @@ "type":"string", "max":100, "min":0, - "pattern":"arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredTable/[\\d\\w-]+" + "pattern":"arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredtable/[\\d\\w-]+" }, "ConfiguredTableAssociation":{ "type":"structure", @@ -1430,7 +2028,7 @@ "type":"string", "max":100, "min":0, - "pattern":"arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredTableAssociation/[\\d\\w-]+/[\\d\\w-]+" + "pattern":"arn:aws:[\\w]+:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:configuredtableassociation/[\\d\\w-]+/[\\d\\w-]+" }, "ConfiguredTableAssociationIdentifier":{ "type":"string", @@ -1578,6 +2176,57 @@ "INVALID_STATE" ] }, + "CreateAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "name", + "format", + "source" + ], + "members":{ + "description":{ + "shape":"ResourceDescription", + "documentation":"

The description of the analysis template.

" + }, + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The identifier for a membership resource.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "name":{ + "shape":"TableAlias", + "documentation":"

The name of the analysis template.

" + }, + "format":{ + "shape":"AnalysisFormat", + "documentation":"

The format of the analysis template.

" + }, + "source":{ + "shape":"AnalysisSource", + "documentation":"

The information in the analysis template. Currently supports text, the query text for the analysis template.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource.

" + }, + "analysisParameters":{ + "shape":"AnalysisParameterList", + "documentation":"

The parameters of the analysis template.

" + } + } + }, + "CreateAnalysisTemplateOutput":{ + "type":"structure", + "required":["analysisTemplate"], + "members":{ + "analysisTemplate":{ + "shape":"AnalysisTemplate", + "documentation":"

The analysis template.

" + } + } + }, "CreateCollaborationInput":{ "type":"structure", "required":[ @@ -1649,7 +2298,7 @@ }, "analysisRuleType":{ "shape":"ConfiguredTableAnalysisRuleType", - "documentation":"

The type of analysis rule. Valid values are AGGREGATION and LIST.

" + "documentation":"

The type of analysis rule.

" }, "analysisRulePolicy":{ "shape":"ConfiguredTableAnalysisRulePolicy", @@ -1818,6 +2467,32 @@ }, "documentation":"

The settings for client-side encryption for cryptographic computing.

" }, + "DeleteAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "analysisTemplateIdentifier" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The identifier for a membership resource.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "analysisTemplateIdentifier":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"

The identifier for the analysis template resource.

", + "location":"uri", + "locationName":"analysisTemplateIdentifier" + } + } + }, + "DeleteAnalysisTemplateOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteCollaborationInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -1966,6 +2641,68 @@ "ACTIVE" ] }, + "GetAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "analysisTemplateIdentifier" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The identifier for a membership resource.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "analysisTemplateIdentifier":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"

The identifier for the analysis template resource.

", + "location":"uri", + "locationName":"analysisTemplateIdentifier" + } + } + }, + "GetAnalysisTemplateOutput":{ + "type":"structure", + "required":["analysisTemplate"], + "members":{ + "analysisTemplate":{ + "shape":"AnalysisTemplate", + "documentation":"

The analysis template.

" + } + } + }, + "GetCollaborationAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "collaborationIdentifier", + "analysisTemplateArn" + ], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"

A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.

", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "analysisTemplateArn":{ + "shape":"AnalysisTemplateArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the analysis template within a collaboration.

", + "location":"uri", + "locationName":"analysisTemplateArn" + } + } + }, + "GetCollaborationAnalysisTemplateOutput":{ + "type":"structure", + "required":["collaborationAnalysisTemplate"], + "members":{ + "collaborationAnalysisTemplate":{ + "shape":"CollaborationAnalysisTemplate", + "documentation":"

The analysis template within a collaboration.

" + } + } + }, "GetCollaborationInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -2255,6 +2992,82 @@ "type":"string", "pattern":"[\\w!.*/-]*" }, + "ListAnalysisTemplatesInput":{ + "type":"structure", + "required":["membershipIdentifier"], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The identifier for a membership resource.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of the results that is returned per call.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListAnalysisTemplatesOutput":{ + "type":"structure", + "required":["analysisTemplateSummaries"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + }, + "analysisTemplateSummaries":{ + "shape":"AnalysisTemplateSummaryList", + "documentation":"

Lists analysis template metadata.

" + } + } + }, + "ListCollaborationAnalysisTemplatesInput":{ + "type":"structure", + "required":["collaborationIdentifier"], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"

A unique identifier for the collaboration that the analysis templates belong to. Currently accepts collaboration ID.

", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum size of the results that is returned per call.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListCollaborationAnalysisTemplatesOutput":{ + "type":"structure", + "required":["collaborationAnalysisTemplateSummaries"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + }, + "collaborationAnalysisTemplateSummaries":{ + "shape":"CollaborationAnalysisTemplateSummaryList", + "documentation":"

The metadata of the analysis template within a collaboration.

" + } + } + }, "ListCollaborationsInput":{ "type":"structure", "members":{ @@ -2822,6 +3635,42 @@ "max":10240, "min":0 }, + "ParameterMap":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValue"} + }, + "ParameterName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[0-9a-zA-Z_]+" + }, + "ParameterType":{ + "type":"string", + "enum":[ + "SMALLINT", + "INTEGER", + "BIGINT", + "DECIMAL", + "REAL", + "DOUBLE_PRECISION", + "BOOLEAN", + "CHAR", + "VARCHAR", + "DATE", + "TIMESTAMP", + "TIMESTAMPTZ", + "TIME", + "TIMETZ", + "VARBYTE" + ] + }, + "ParameterValue":{ + "type":"string", + "max":250, + "min":0 + }, "ProtectedQuery":{ "type":"structure", "required":[ @@ -2990,6 +3839,14 @@ "queryString":{ "shape":"ProtectedQuerySQLParametersQueryStringString", "documentation":"

The query string to be submitted.

" + }, + "analysisTemplateArn":{ + "shape":"AnalysisTemplateArn", + "documentation":"

The Amazon Resource Name (ARN) associated with the analysis template within a collaboration.

" + }, + "parameters":{ + "shape":"ParameterMap", + "documentation":"

The protected query SQL parameters.

" } }, "documentation":"

The parameters for the SQL type Protected Query.

", @@ -3063,6 +3920,22 @@ "type":"string", "enum":["SQL"] }, + "QueryTables":{ + "type":"list", + "member":{"shape":"TableAlias"} + }, + "ResourceAlias":{ + "type":"string", + "max":128, + "min":0, + "pattern":"[a-zA-Z0-9_](([a-zA-Z0-9_ ]+-)*([a-zA-Z0-9_ ]+))?" + }, + "ResourceDescription":{ + "type":"string", + "max":255, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t\\r\\n]*" + }, "ResourceNotFoundException":{ "type":"structure", "required":[ @@ -3158,7 +4031,7 @@ }, "analysisRuleTypes":{ "shape":"AnalysisRuleTypeList", - "documentation":"

The analysis rule types associated with the schema. Valued values are LIST and AGGREGATION. Currently, only one entry is present.

" + "documentation":"

The analysis rule types associated with the schema. Currently, only one entry is present.

" }, "analysisMethod":{ "shape":"AnalysisMethod", @@ -3454,6 +4327,41 @@ "members":{ } }, + "UpdateAnalysisTemplateInput":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "analysisTemplateIdentifier" + ], + "members":{ + "membershipIdentifier":{ + "shape":"MembershipIdentifier", + "documentation":"

The identifier for a membership resource.

", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "analysisTemplateIdentifier":{ + "shape":"AnalysisTemplateIdentifier", + "documentation":"

The identifier for the analysis template resource.

", + "location":"uri", + "locationName":"analysisTemplateIdentifier" + }, + "description":{ + "shape":"ResourceDescription", + "documentation":"

A new description for the analysis template.

" + } + } + }, + "UpdateAnalysisTemplateOutput":{ + "type":"structure", + "required":["analysisTemplate"], + "members":{ + "analysisTemplate":{ + "shape":"AnalysisTemplate", + "documentation":"

The analysis template.

" + } + } + }, "UpdateCollaborationInput":{ "type":"structure", "required":["collaborationIdentifier"], @@ -3695,7 +4603,8 @@ "enum":[ "FIELD_VALIDATION_FAILED", "INVALID_CONFIGURATION", - "INVALID_QUERY" + "INVALID_QUERY", + "IAM_SYNCHRONIZATION_DELAY" ] } }, diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index 71733ad3f0c..a777b374e60 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 cloud9 diff --git a/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json index 7f41719e6ef..fbaea21794b 100644 --- a/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] }, - "supportsFIPS" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloud9-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloud9-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloud9-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloud9.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -222,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://cloud9-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloud9.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -231,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloud9.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://cloud9.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/cloud9/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloud9/src/main/resources/codegen-resources/endpoint-tests.json index 247e617ca0f..b28ec45a62f 100644 --- a/services/cloud9/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloud9/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,1099 +1,533 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.eu-west-2.amazonaws.com" + "url": "https://cloud9.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "Region": "eu-west-2", "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.eu-west-1.api.aws" + "url": "https://cloud9.ap-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.eu-west-1.amazonaws.com" + "url": "https://cloud9.ap-northeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "eu-west-1", + "Region": "ap-northeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.eu-west-1.api.aws" + "url": "https://cloud9.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.eu-west-1.amazonaws.com" + "url": "https://cloud9.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "eu-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-3.api.aws" + "url": "https://cloud9.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-3.amazonaws.com" + "url": "https://cloud9.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", + "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-3.api.aws" + "url": "https://cloud9.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-3.amazonaws.com" + "url": "https://cloud9.ca-central-1.amazonaws.com" } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ap-northeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-2.api.aws" + "url": "https://cloud9.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-2.amazonaws.com" + "url": "https://cloud9.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-2.api.aws" + "url": "https://cloud9.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-2.amazonaws.com" + "url": "https://cloud9.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "ap-northeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-1.api.aws" + "url": "https://cloud9.eu-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-northeast-1.amazonaws.com" + "url": "https://cloud9.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-1.api.aws" + "url": "https://cloud9.me-south-1.amazonaws.com" } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-northeast-1.amazonaws.com" + "url": "https://cloud9.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "ap-northeast-1", "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.me-south-1.api.aws" + "url": "https://cloud9.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.me-south-1.amazonaws.com" + "url": "https://cloud9.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "me-south-1", + "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.me-south-1.api.aws" + "url": "https://cloud9.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.me-south-1.amazonaws.com" + "url": "https://cloud9.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "me-south-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.sa-east-1.api.aws" + "url": "https://cloud9-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "sa-east-1", "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.sa-east-1.amazonaws.com" + "url": "https://cloud9-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "sa-east-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9.sa-east-1.api.aws" + "url": "https://cloud9.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "sa-east-1", "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-east-1.api.aws" + "url": "https://cloud9-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": true } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-east-1.amazonaws.com" + "url": "https://cloud9-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-east-1.api.aws" + "url": "https://cloud9.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": true } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-east-1.amazonaws.com" + "url": "https://cloud9.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-southeast-1.api.aws" + "url": "https://cloud9-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-southeast-1", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-southeast-1.amazonaws.com" + "url": "https://cloud9-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-southeast-1.api.aws" + "url": "https://cloud9.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-southeast-1.amazonaws.com" + "url": "https://cloud9.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloud9-fips.ap-southeast-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.ap-southeast-2.amazonaws.com" + "url": "https://cloud9-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloud9.ap-southeast-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.ap-southeast-2.amazonaws.com" + "url": "https://cloud9.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9-fips.us-east-1.amazonaws.com" + "url": "https://cloud9-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloud9.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloud9.us-east-1.amazonaws.com" + "url": "https://cloud9.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloud9-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloud9.us-east-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://cloud9.us-east-2.amazonaws.com" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1101,7 +535,6 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1112,8 +545,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1124,11 +557,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloud9/src/main/resources/codegen-resources/service-2.json b/services/cloud9/src/main/resources/codegen-resources/service-2.json index 7e9cca54ffb..2d2f489783d 100644 --- a/services/cloud9/src/main/resources/codegen-resources/service-2.json +++ b/services/cloud9/src/main/resources/codegen-resources/service-2.json @@ -330,7 +330,7 @@ }, "imageId":{ "shape":"ImageId", - "documentation":"

The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.

The default Amazon Linux AMI is currently used if the parameter isn't explicitly assigned a value in the request.

In the future the parameter for Amazon Linux will no longer be available when you specify an AMI for your instance. Amazon Linux 2 will then become the default AMI, which is used to launch your instance if no parameter is explicitly defined.

AMI aliases

  • Amazon Linux (default): amazonlinux-1-x86_64

  • Amazon Linux 2: amazonlinux-2-x86_64

  • Ubuntu 18.04: ubuntu-18.04-x86_64

SSM paths

  • Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64

  • Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64

  • Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64

" + "documentation":"

The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.

The default Amazon Linux AMI is currently used if the parameter isn't explicitly assigned a value in the request. Because Amazon Linux AMI has ended standard support as of December 31, 2020, we recommend you choose Amazon Linux 2, which includes long term support through 2023.

From December 31, 2023, the parameter for Amazon Linux will no longer be available when you specify an AMI for your instance. Amazon Linux 2 will then become the default AMI, which is used to launch your instance if no parameter is explicitly defined.

AMI aliases

  • Amazon Linux (default): amazonlinux-1-x86_64

  • Amazon Linux 2: amazonlinux-2-x86_64

  • Ubuntu 18.04: ubuntu-18.04-x86_64

SSM paths

  • Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64

  • Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64

  • Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64

" }, "automaticStopTimeMinutes":{ "shape":"AutomaticStopTimeMinutes", diff --git a/services/cloudcontrol/pom.xml b/services/cloudcontrol/pom.xml index b81b90ff714..c6f585ad8ac 100644 --- a/services/cloudcontrol/pom.xml +++ b/services/cloudcontrol/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudcontrol AWS Java SDK :: Services :: Cloud Control diff --git a/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-rule-set.json index e215956866a..1812986e28a 100644 --- a/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://cloudcontrolapi-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://cloudcontrolapi.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-tests.json index ab39a6b29bf..d21e7072890 100644 --- a/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudcontrol/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,276 +1,133 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.eu-south-2.amazonaws.com" + "url": "https://cloudcontrolapi.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-south-2", - "UseFIPS": false + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-gov-east-1.api.aws" + "url": "https://cloudcontrolapi.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-gov-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-gov-east-1.api.aws" + "url": "https://cloudcontrolapi.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-gov-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.me-central-1.api.aws" + "url": "https://cloudcontrolapi.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.me-central-1.amazonaws.com" + "url": "https://cloudcontrolapi.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.me-central-1.api.aws" + "url": "https://cloudcontrolapi.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.me-central-1.amazonaws.com" + "url": "https://cloudcontrolapi.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ca-central-1.api.aws" + "url": "https://cloudcontrolapi.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, "Region": "ca-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { @@ -281,74 +138,9 @@ } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "UseFIPS": true, + "UseDualStack": false } }, { @@ -359,1114 +151,495 @@ } }, "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.sa-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-east-1.api.aws" + "url": "https://cloudcontrolapi.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-east-1.api.aws" + "url": "https://cloudcontrolapi.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-east-1.amazonaws.com" + "url": "https://cloudcontrolapi.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://cloudcontrolapi.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.cn-north-1.amazonaws.com.cn" + "url": "https://cloudcontrolapi.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://cloudcontrolapi.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.cn-north-1.amazonaws.com.cn" + "url": "https://cloudcontrolapi-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-gov-west-1.api.aws" + "url": "https://cloudcontrolapi.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-gov-west-1.amazonaws.com" + "url": "https://cloudcontrolapi-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-gov-west-1.api.aws" + "url": "https://cloudcontrolapi.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-gov-west-1.amazonaws.com" + "url": "https://cloudcontrolapi-fips.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-1.api.aws" + "url": "https://cloudcontrolapi.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-1.amazonaws.com" + "url": "https://cloudcontrolapi-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-1.api.aws" + "url": "https://cloudcontrolapi-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-1.amazonaws.com" + "url": "https://cloudcontrolapi.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-2.api.aws" + "url": "https://cloudcontrolapi.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-2.amazonaws.com" + "url": "https://cloudcontrolapi.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-2.api.aws" + "url": "https://cloudcontrolapi-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-2.amazonaws.com" + "url": "https://cloudcontrolapi-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-3.api.aws" + "url": "https://cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.ap-southeast-3.amazonaws.com" + "url": "https://cloudcontrolapi.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-3.api.aws" + "url": "https://cloudcontrolapi-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.ap-southeast-3.amazonaws.com" + "url": "https://cloudcontrolapi.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-east-1.api.aws" + "url": "https://cloudcontrolapi-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-east-1.amazonaws.com" + "url": "https://cloudcontrolapi-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-east-1.api.aws" + "url": "https://cloudcontrolapi.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.us-east-2.api.aws" + "url": "https://cloudcontrolapi-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.us-east-2.api.aws" + "url": "https://cloudcontrolapi.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloudcontrolapi.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://cloudcontrolapi-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://cloudcontrolapi-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://cloudcontrolapi.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://cloudcontrolapi.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1476,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1488,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloudcontrol/src/main/resources/codegen-resources/service-2.json b/services/cloudcontrol/src/main/resources/codegen-resources/service-2.json index b59888f2c05..9e34fe76313 100644 --- a/services/cloudcontrol/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudcontrol/src/main/resources/codegen-resources/service-2.json @@ -286,7 +286,7 @@ }, "DesiredState":{ "shape":"Properties", - "documentation":"

Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values.

Cloud Control API currently supports JSON as a structured data format.

 <p>Specify the desired state as one of the following:</p> <ul> <li> <p>A JSON blob</p> </li> <li> <p>A local path containing the desired state in JSON data format</p> </li> </ul> <p>For more information, see <a href="https://docs.aws.amazon.com/cloudcontrolapi/latest/userguide/resource-operations-create.html#resource-operations-create-desiredstate">Composing the desired state of the resource</a> in the <i>Amazon Web Services Cloud Control API User Guide</i>.</p> <p>For more information about the properties of a specific resource, refer to the related topic for the resource in the <a href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html">Resource and property types reference</a> in the <i>CloudFormation Users Guide</i>.</p> 
" + "documentation":"

Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values.

Cloud Control API currently supports JSON as a structured data format.

Specify the desired state as one of the following:

  • A JSON blob

  • A local path containing the desired state in JSON data format

For more information, see Composing the desired state of the resource in the Amazon Web Services Cloud Control API User Guide.

For more information about the properties of a specific resource, refer to the related topic for the resource in the Resource and property types reference in the CloudFormation Users Guide.

" } } }, diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index 25d4c2988dd..595227af12e 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index ffd5ecfb0f9..3b4121c4d65 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudformation/src/main/resources/codegen-resources/endpoint-rule-set.json index 9cbca10387c..226c1811ddf 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudformation/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudformation-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://cloudformation-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://cloudformation.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://cloudformation-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://cloudformation.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://cloudformation-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudformation.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://cloudformation.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloudformation.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://cloudformation.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 9a7cd0ce304..7fe1b649322 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -709,6 +709,24 @@ }, "documentation":"

Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.

For more information about importing an exported output value, see the Fn::ImportValue function.

" }, + "ListStackInstanceResourceDrifts":{ + "name":"ListStackInstanceResourceDrifts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStackInstanceResourceDriftsInput"}, + "output":{ + "shape":"ListStackInstanceResourceDriftsOutput", + "resultWrapper":"ListStackInstanceResourceDriftsResult" + }, + "errors":[ + {"shape":"StackSetNotFoundException"}, + {"shape":"StackInstanceNotFoundException"}, + {"shape":"OperationNotFoundException"} + ], + "documentation":"

Returns drift information for resources in a stack instance.

ListStackInstanceResourceDrifts returns drift information for the most recent drift detection operation. If an operation is in progress, it may only return partial results.

" + }, "ListStackInstances":{ "name":"ListStackInstances", "http":{ @@ -1335,7 +1353,10 @@ "CFNRegistryException":{ "type":"structure", "members":{ - "Message":{"shape":"ErrorMessage"} + "Message":{ + "shape":"ErrorMessage", + "documentation":"

An message with details about the error that occurred.

" + } }, "documentation":"

An error occurred during a CloudFormation registry operation.

", "error":{ @@ -1700,7 +1721,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM resources in CloudFormation templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.

    For more information about macros, see Using CloudFormation macros to perform custom processing on templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM resources in CloudFormation templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.

    For more information about macros, see Using CloudFormation macros to perform custom processing on templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", @@ -1805,7 +1826,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.

    You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

    For more information, see Using CloudFormation macros to perform custom processing on templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.

    You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

    For more information, see Using CloudFormation macros to perform custom processing on templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", @@ -1837,7 +1858,11 @@ }, "EnableTerminationProtection":{ "shape":"EnableTerminationProtection", - "documentation":"

Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default.

For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack.

" + "documentation":"

Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default.

For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack.

" + }, + "RetainExceptOnCreate":{ + "shape":"RetainExceptOnCreate", + "documentation":"

This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.

" } }, "documentation":"

The input for CreateStack action.

" @@ -1933,7 +1958,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

    Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

    Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

" }, "Tags":{ "shape":"Tags", @@ -1941,7 +1966,7 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to use to create this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to use to create this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", @@ -3052,6 +3077,10 @@ "DisableRollback":{ "shape":"DisableRollback", "documentation":"

Preserves the state of previously provisioned resources when an operation fails. This parameter can't be specified when the OnStackFailure parameter to the CreateChangeSet API operation was specified.

  • True - if the stack creation fails, do nothing. This is equivalent to specifying DO_NOTHING for the OnStackFailure parameter to the CreateChangeSet API operation.

  • False - if the stack creation fails, roll back the stack. This is equivalent to specifying ROLLBACK for the OnStackFailure parameter to the CreateChangeSet API operation.

Default: True

" + }, + "RetainExceptOnCreate":{ + "shape":"RetainExceptOnCreate", + "documentation":"

This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.

" } }, "documentation":"

The input for the ExecuteChangeSet action.

" @@ -3541,6 +3570,62 @@ } } }, + "ListStackInstanceResourceDriftsInput":{ + "type":"structure", + "required":[ + "StackSetName", + "StackInstanceAccount", + "StackInstanceRegion", + "OperationId" + ], + "members":{ + "StackSetName":{ + "shape":"StackSetNameOrId", + "documentation":"

The name or unique ID of the stack set that you want to list drifted resources for.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + }, + "StackInstanceResourceDriftStatuses":{ + "shape":"StackResourceDriftStatusFilters", + "documentation":"

The resource drift status of the stack instance.

  • DELETED: The resource differs from its expected template configuration in that the resource has been deleted.

  • MODIFIED: One or more resource properties differ from their expected template values.

  • IN_SYNC: The resource's actual configuration matches its expected template configuration.

  • NOT_CHECKED: CloudFormation doesn't currently return this value.

" + }, + "StackInstanceAccount":{ + "shape":"Account", + "documentation":"

The name of the Amazon Web Services account that you want to list resource drifts for.

" + }, + "StackInstanceRegion":{ + "shape":"Region", + "documentation":"

The name of the Region where you want to list resource drifts.

" + }, + "OperationId":{ + "shape":"ClientRequestToken", + "documentation":"

The unique ID of the drift operation.

" + }, + "CallAs":{ + "shape":"CallAs", + "documentation":"

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

By default, SELF is specified. Use SELF for stack sets with self-managed permissions.

  • If you are signed in to the management account, specify SELF.

  • If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide.

" + } + } + }, + "ListStackInstanceResourceDriftsOutput":{ + "type":"structure", + "members":{ + "Summaries":{ + "shape":"StackInstanceResourceDriftsSummaries", + "documentation":"

A list of StackInstanceResourceDriftSummary structures that contain information about the specified stack instances.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

" + } + } + }, "ListStackInstancesInput":{ "type":"structure", "required":["StackSetName"], @@ -4789,6 +4874,7 @@ "type":"list", "member":{"shape":"ResourceToSkip"} }, + "RetainExceptOnCreate":{"type":"boolean"}, "RetainResources":{ "type":"list", "member":{"shape":"LogicalResourceId"} @@ -4819,7 +4905,7 @@ "documentation":"

The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.

The default is 0 minutes.

If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack, for example) as necessary.

If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.

" } }, - "documentation":"

Structure containing the rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.

" + "documentation":"

Structure containing the rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.

" }, "RollbackStackInput":{ "type":"structure", @@ -4836,6 +4922,10 @@ "ClientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

A unique identifier for this RollbackStack request.

" + }, + "RetainExceptOnCreate":{ + "shape":"RetainExceptOnCreate", + "documentation":"

This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.

" } } }, @@ -5076,19 +5166,23 @@ }, "EnableTerminationProtection":{ "shape":"EnableTerminationProtection", - "documentation":"

Whether termination protection is enabled for the stack.

For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide.

" + "documentation":"

Whether termination protection is enabled for the stack.

For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide.

" }, "ParentId":{ "shape":"StackId", - "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" + "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" }, "RootId":{ "shape":"StackId", - "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" + "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" }, "DriftInformation":{ "shape":"StackDriftInformation", - "documentation":"

Information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "documentation":"

Information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + }, + "RetainExceptOnCreate":{ + "shape":"RetainExceptOnCreate", + "documentation":"

This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.

" } }, "documentation":"

The Stack data type.

" @@ -5331,7 +5425,8 @@ "type":"string", "enum":[ "DETAILED_STATUS", - "LAST_OPERATION_ID" + "LAST_OPERATION_ID", + "DRIFT_STATUS" ] }, "StackInstanceFilterValues":{ @@ -5343,7 +5438,7 @@ "StackInstanceFilters":{ "type":"list", "member":{"shape":"StackInstanceFilter"}, - "max":2 + "max":3 }, "StackInstanceNotFoundException":{ "type":"structure", @@ -5357,6 +5452,55 @@ }, "exception":true }, + "StackInstanceResourceDriftsSummaries":{ + "type":"list", + "member":{"shape":"StackInstanceResourceDriftsSummary"} + }, + "StackInstanceResourceDriftsSummary":{ + "type":"structure", + "required":[ + "StackId", + "LogicalResourceId", + "ResourceType", + "StackResourceDriftStatus", + "Timestamp" + ], + "members":{ + "StackId":{ + "shape":"StackId", + "documentation":"

The ID of the stack instance.

" + }, + "LogicalResourceId":{ + "shape":"LogicalResourceId", + "documentation":"

The logical name of the resource specified in the template.

" + }, + "PhysicalResourceId":{ + "shape":"PhysicalResourceId", + "documentation":"

The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.

" + }, + "PhysicalResourceIdContext":{ + "shape":"PhysicalResourceIdContext", + "documentation":"

Context information that enables CloudFormation to uniquely identify a resource. CloudFormation uses context key-value pairs in cases where a resource's logical and physical IDs aren't enough to uniquely identify that resource. Each context key-value pair specifies a unique resource that contains the targeted resource.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

Type of resource. For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide.

" + }, + "PropertyDifferences":{ + "shape":"PropertyDifferences", + "documentation":"

Status of the actual configuration of the resource compared to its expected configuration. These will be present only for resources whose StackInstanceResourceDriftStatus is MODIFIED.

" + }, + "StackResourceDriftStatus":{ + "shape":"StackResourceDriftStatus", + "documentation":"

The drift status of the resource in a stack instance.

  • DELETED: The resource differs from its expected template configuration in that the resource has been deleted.

  • MODIFIED: One or more resource properties differ from their expected template values.

  • IN_SYNC: The resource's actual configuration matches its expected template configuration.

  • NOT_CHECKED: CloudFormation doesn't currently return this value.

" + }, + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

Time at which the stack instance drift detection operation was initiated.

" + } + }, + "documentation":"

The structure containing summary information about resource drifts for a stack instance.

" + }, "StackInstanceStatus":{ "type":"string", "enum":[ @@ -5504,7 +5648,7 @@ }, "DriftInformation":{ "shape":"StackResourceDriftInformation", - "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" }, "ModuleInfo":{ "shape":"ModuleInfo", @@ -5564,7 +5708,7 @@ }, "DriftInformation":{ "shape":"StackResourceDriftInformation", - "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" }, "ModuleInfo":{ "shape":"ModuleInfo", @@ -5628,7 +5772,7 @@ "documentation":"

Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

" } }, - "documentation":"

Contains the drift information for a resource that has been checked for drift. This includes actual and expected property values for resources in which CloudFormation has detected drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

" + "documentation":"

Contains the drift information for a resource that has been checked for drift. This includes actual and expected property values for resources in which CloudFormation has detected drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

" }, "StackResourceDriftInformation":{ "type":"structure", @@ -5718,7 +5862,7 @@ }, "DriftInformation":{ "shape":"StackResourceDriftInformationSummary", - "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "documentation":"

Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" }, "ModuleInfo":{ "shape":"ModuleInfo", @@ -5772,7 +5916,7 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role used to create or update the stack set.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role used to create or update the stack set.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", @@ -5921,7 +6065,7 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role used to perform this stack set operation.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the CloudFormation User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role used to perform this stack set operation.

Use customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Define Permissions for Multiple Administrators in the CloudFormation User Guide.

" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", @@ -6233,15 +6377,15 @@ }, "ParentId":{ "shape":"StackId", - "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" + "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" }, "RootId":{ "shape":"StackId", - "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" + "documentation":"

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

For more information, see Working with Nested Stacks in the CloudFormation User Guide.

" }, "DriftInformation":{ "shape":"StackDriftInformationSummary", - "documentation":"

Summarizes information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "documentation":"

Summarizes information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" } }, "documentation":"

The StackSummary Data Type

" @@ -6780,7 +6924,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.

    You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

    For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.

    You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.

    Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

    For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

" }, "ResourceTypes":{ "shape":"ResourceTypes", @@ -6817,6 +6961,10 @@ "ClientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that CloudFormation successfully received them.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + }, + "RetainExceptOnCreate":{ + "shape":"RetainExceptOnCreate", + "documentation":"

This deletion policy deletes newly created resources, but retains existing resources, when a stack operation is rolled back. This ensures new, empty, and unused resources are deleted, while critical resources and their data are retained. RetainExceptOnCreate can be specified for any resource that supports the DeletionPolicy attribute.

" } }, "documentation":"

The input for an UpdateStack action.

" @@ -6912,7 +7060,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

    Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

" + "documentation":"

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.

  • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.

    The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    • If you have IAM resources, you can specify either capability.

    • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

    • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

    If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

    For more information, see Acknowledging IAM Resources in CloudFormation Templates.

  • CAPABILITY_AUTO_EXPAND

    Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates.

    Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

" }, "Tags":{ "shape":"Tags", @@ -6924,7 +7072,7 @@ }, "AdministrationRoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to use to update this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to use to update this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the CloudFormation User Guide.

If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously.

" }, "ExecutionRoleName":{ "shape":"ExecutionRoleName", diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index be91e940a2c..18d3adcb72b 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudfront/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudfront/src/main/resources/codegen-resources/endpoint-rule-set.json index 2d908ad292a..07a287ff6fe 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudfront/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -138,216 +138,91 @@ }, "aws" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + false + ] + } + ], + "endpoint": { + "url": "https://cloudfront.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "cloudfront", + "signingRegion": "us-east-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseFIPS" + "ref": "PartitionResult" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } + "name" ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + "aws" ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } + true ] }, { - "conditions": [], - "endpoint": { - "url": "https://cloudfront.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "us-east-1" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, - "headers": {} - }, - "type": "endpoint" + false + ] } - ] + ], + "endpoint": { + "url": "https://cloudfront-fips.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "cloudfront", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -365,208 +240,40 @@ }, "aws-cn" ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront-fips.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront-fips.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseFIPS" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } + false ] }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "ref": "UseDualStack" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + false + ] + } + ], + "endpoint": { + "url": "https://cloudfront.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "cloudfront", + "signingRegion": "cn-northwest-1" } ] }, - { - "conditions": [], - "endpoint": { - "url": "https://cloudfront.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] + "headers": {} + }, + "type": "endpoint" }, { "conditions": [ @@ -688,33 +395,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://cloudfront-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { @@ -797,60 +477,6 @@ "conditions": [], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://cloudfront.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://cloudfront.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "cloudfront", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, { "conditions": [], "endpoint": { diff --git a/services/cloudfront/src/main/resources/codegen-resources/endpoint-tests.json b/services/cloudfront/src/main/resources/codegen-resources/endpoint-tests.json index 736cf87722f..0f37f3195b0 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/cloudfront/src/main/resources/codegen-resources/endpoint-tests.json @@ -18,8 +18,8 @@ }, "params": { "Region": "aws-global", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -40,8 +40,8 @@ }, "params": { "Region": "aws-global", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -53,8 +53,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -75,8 +75,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -88,8 +88,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -110,8 +110,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -132,8 +132,8 @@ }, "params": { "Region": "aws-cn-global", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -145,8 +145,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -158,8 +158,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -171,8 +171,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -193,8 +193,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -206,8 +206,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -219,8 +219,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -232,8 +232,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -245,8 +245,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -258,8 +269,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -271,8 +293,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -284,8 +317,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -297,8 +341,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -310,8 +354,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -323,8 +367,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -335,8 +379,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -347,10 +391,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/cloudfront/src/main/resources/codegen-resources/service-2.json b/services/cloudfront/src/main/resources/codegen-resources/service-2.json index 1ea9d33492c..e2300702ae6 100644 --- a/services/cloudfront/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudfront/src/main/resources/codegen-resources/service-2.json @@ -97,6 +97,7 @@ {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"}, {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, + {"shape":"TooManyDistributionsAssociatedToOriginAccessControl"}, {"shape":"NoSuchResponseHeadersPolicy"}, {"shape":"TooManyDistributionsAssociatedToResponseHeadersPolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, @@ -107,7 +108,7 @@ {"shape":"NoSuchRealtimeLogConfig"}, {"shape":"RealtimeLogConfigOwnerMismatch"} ], - "documentation":"

Creates a staging distribution using the configuration of the provided primary distribution. A staging distribution is a copy of an existing distribution (called the primary distribution) that you can use in a continuous deployment workflow.

After you create a staging distribution, you can use UpdateDistribution to modify the staging distribution's configuration. Then you can use CreateContinuousDeploymentPolicy to incrementally move traffic to the staging distribution.

" + "documentation":"

Creates a staging distribution using the configuration of the provided primary distribution. A staging distribution is a copy of an existing distribution (called the primary distribution) that you can use in a continuous deployment workflow.

After you create a staging distribution, you can use UpdateDistribution to modify the staging distribution's configuration. Then you can use CreateContinuousDeploymentPolicy to incrementally move traffic to the staging distribution.

This API operation requires the following IAM permissions:

" }, "CreateCachePolicy":{ "name":"CreateCachePolicy2020_05_31", @@ -260,6 +261,7 @@ {"shape":"InvalidOrigin"}, {"shape":"InvalidOriginAccessIdentity"}, {"shape":"InvalidOriginAccessControl"}, + {"shape":"IllegalOriginAccessConfiguration"}, {"shape":"AccessDenied"}, {"shape":"TooManyTrustedSigners"}, {"shape":"TrustedSignerDoesNotExist"}, @@ -307,6 +309,7 @@ {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"}, {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, + {"shape":"TooManyDistributionsAssociatedToOriginAccessControl"}, {"shape":"NoSuchResponseHeadersPolicy"}, {"shape":"TooManyDistributionsAssociatedToResponseHeadersPolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, @@ -320,7 +323,7 @@ {"shape":"NoSuchContinuousDeploymentPolicy"}, {"shape":"InvalidDomainNameForOriginAccessControl"} ], - "documentation":"

Create a new distribution with tags.

" + "documentation":"

Create a new distribution with tags. This API operation requires the following IAM permissions:

" }, "CreateFieldLevelEncryptionConfig":{ "name":"CreateFieldLevelEncryptionConfig2020_05_31", @@ -1780,6 +1783,7 @@ {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"}, {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, + {"shape":"TooManyDistributionsAssociatedToOriginAccessControl"}, {"shape":"NoSuchResponseHeadersPolicy"}, {"shape":"TooManyDistributionsAssociatedToResponseHeadersPolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, @@ -1857,6 +1861,7 @@ {"shape":"TooManyDistributionsAssociatedToFieldLevelEncryptionConfig"}, {"shape":"NoSuchCachePolicy"}, {"shape":"TooManyDistributionsAssociatedToCachePolicy"}, + {"shape":"TooManyDistributionsAssociatedToOriginAccessControl"}, {"shape":"NoSuchResponseHeadersPolicy"}, {"shape":"TooManyDistributionsAssociatedToResponseHeadersPolicy"}, {"shape":"NoSuchOriginRequestPolicy"}, @@ -1867,7 +1872,7 @@ {"shape":"NoSuchRealtimeLogConfig"}, {"shape":"RealtimeLogConfigOwnerMismatch"} ], - "documentation":"

Copies the staging distribution's configuration to its corresponding primary distribution. The primary distribution retains its Aliases (also known as alternate domain names or CNAMEs) and ContinuousDeploymentPolicyId value, but otherwise its configuration is overwritten to match the staging distribution.

You can use this operation in a continuous deployment workflow after you have tested configuration changes on the staging distribution. After using a continuous deployment policy to move a portion of your domain name's traffic to the staging distribution and verifying that it works as intended, you can use this operation to copy the staging distribution's configuration to the primary distribution. This action will disable the continuous deployment policy and move your domain's traffic back to the primary distribution.

" + "documentation":"

Copies the staging distribution's configuration to its corresponding primary distribution. The primary distribution retains its Aliases (also known as alternate domain names or CNAMEs) and ContinuousDeploymentPolicyId value, but otherwise its configuration is overwritten to match the staging distribution.

You can use this operation in a continuous deployment workflow after you have tested configuration changes on the staging distribution. After using a continuous deployment policy to move a portion of your domain name's traffic to the staging distribution and verifying that it works as intended, you can use this operation to copy the staging distribution's configuration to the primary distribution. This action will disable the continuous deployment policy and move your domain's traffic back to the primary distribution.

This API operation requires the following IAM permissions:

" }, "UpdateFieldLevelEncryptionConfig":{ "name":"UpdateFieldLevelEncryptionConfig2020_05_31", @@ -2155,7 +2160,7 @@ "members":{ "Enabled":{ "shape":"boolean", - "documentation":"

This field is true if any of the Amazon Web Services accounts in the list have active CloudFront key pairs that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false.

" + "documentation":"

This field is true if any of the Amazon Web Services accounts in the list are configured as trusted signers. If not, this field is false.

" }, "Quantity":{ "shape":"integer", @@ -3058,6 +3063,10 @@ "CallerReference":{ "shape":"string", "documentation":"

A value that uniquely identifies a request to create a resource. This helps to prevent CloudFront from creating a duplicate resource if you accidentally resubmit an identical request.

" + }, + "Enabled":{ + "shape":"boolean", + "documentation":"

A Boolean flag to specify the state of the staging distribution when it's created. When you set this value to True, the staging distribution is enabled. When you set this value to False, the staging distribution is disabled.

If you omit this field, the default value is True.

" } } }, @@ -5072,7 +5081,7 @@ }, "Runtime":{ "shape":"FunctionRuntime", - "documentation":"

The function's runtime environment. The only valid value is cloudfront-js-1.0.

" + "documentation":"

The function's runtime environment verion.

" } }, "documentation":"

Contains configuration information about a CloudFront function.

" @@ -5156,7 +5165,10 @@ }, "FunctionRuntime":{ "type":"string", - "enum":["cloudfront-js-1.0"] + "enum":[ + "cloudfront-js-1.0", + "cloudfront-js-2.0" + ] }, "FunctionSizeLimitExceeded":{ "type":"structure", @@ -10342,7 +10354,7 @@ "members":{ "Enabled":{ "shape":"boolean", - "documentation":"

This field is true if any of the Amazon Web Services accounts have public keys that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false.

" + "documentation":"

This field is true if any of the Amazon Web Services accounts in the list are configured as trusted signers. If not, this field is false.

" }, "Quantity":{ "shape":"integer", diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index e8a9e266f6a..994b7e24db3 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 771b52c4a3d..14b47fac8ee 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 0e429b3fa88..3935738f819 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index d4e5c9f8494..95d98190035 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index 29db3954d6a..b1c5cfaaab3 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtrail/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudtrail/src/main/resources/codegen-resources/endpoint-rule-set.json index 2b5ffaa7a8c..be85a60fb63 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cloudtrail-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://cloudtrail-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,168 +225,128 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "Region" + }, + "us-gov-east-1" ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://cloudtrail.us-gov-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-east-1" - ] - } - ], - "endpoint": { - "url": "https://cloudtrail.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + "fn": "stringEquals", + "argv": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-west-1" - ] - } - ], - "endpoint": { - "url": "https://cloudtrail.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "Region" }, - { - "conditions": [], - "endpoint": { - "url": "https://cloudtrail-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "us-gov-west-1" ] } - ] + ], + "endpoint": { + "url": "https://cloudtrail.us-gov-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://cloudtrail-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cloudtrail.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://cloudtrail.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloudtrail.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://cloudtrail.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json index 9ded7eeb1e5..60f77595e8a 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json @@ -1136,7 +1136,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the field is used only for selecting events as filtering is not supported.

For CloudTrail event records, supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the only supported field is eventCategory.

  • readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events.

  • eventSource - For filtering management events only. This can be set only to NotEquals kms.amazonaws.com.

  • eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas.

  • eventCategory - This is required and must be set to Equals.

    • For CloudTrail event records, the value must be Management or Data.

    • For Config configuration items, the value must be ConfigurationItem.

    • For Audit Manager evidence, the value must be Evidence.

    • For non-Amazon Web Services events, the value must be ActivityAuditLog.

  • resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following:

    • AWS::DynamoDB::Table

    • AWS::Lambda::Function

    • AWS::S3::Object

    • AWS::CloudTrail::Channel

    • AWS::CodeWhisperer::Profile

    • AWS::Cognito::IdentityPool

    • AWS::DynamoDB::Stream

    • AWS::EC2::Snapshot

    • AWS::EMRWAL::Workspace

    • AWS::FinSpace::Environment

    • AWS::Glue::Table

    • AWS::GuardDuty::Detector

    • AWS::KendraRanking::ExecutionPlan

    • AWS::ManagedBlockchain::Node

    • AWS::SageMaker::ExperimentTrialComponent

    • AWS::SageMaker::FeatureGroup

    • AWS::S3::AccessPoint

    • AWS::S3ObjectLambda::AccessPoint

    • AWS::S3Outposts::Object

    You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector.

  • resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value.

    The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.

    • arn:<partition>:s3:::<bucket_name>/

    • arn:<partition>:s3:::<bucket_name>/<object_path>/

    When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>

    When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>

    When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>

    When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:codewhisperer:<region>:<account_ID>:profile/<profile_ID>

    When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>

    When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>

    When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>

    When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:emrwal:<region>::workspace/<workspace_name>

    When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>

    When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>

    When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>

    When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>

    When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>

    When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>

    When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>

    When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators.

    • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>

    • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>

    When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>

    When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>

" + "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the field is used only for selecting events as filtering is not supported.

For CloudTrail event records, supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the only supported field is eventCategory.

  • readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events.

  • eventSource - For filtering management events only. This can be set only to NotEquals kms.amazonaws.com.

  • eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas.

  • eventCategory - This is required and must be set to Equals.

    • For CloudTrail event records, the value must be Management or Data.

    • For Config configuration items, the value must be ConfigurationItem.

    • For Audit Manager evidence, the value must be Evidence.

    • For non-Amazon Web Services events, the value must be ActivityAuditLog.

  • resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following:

    • AWS::DynamoDB::Table

    • AWS::Lambda::Function

    • AWS::S3::Object

    • AWS::CloudTrail::Channel

    • AWS::CodeWhisperer::Profile

    • AWS::Cognito::IdentityPool

    • AWS::DynamoDB::Stream

    • AWS::EC2::Snapshot

    • AWS::EMRWAL::Workspace

    • AWS::FinSpace::Environment

    • AWS::Glue::Table

    • AWS::GuardDuty::Detector

    • AWS::KendraRanking::ExecutionPlan

    • AWS::ManagedBlockchain::Network

    • AWS::ManagedBlockchain::Node

    • AWS::MedicalImaging::Datastore

    • AWS::SageMaker::ExperimentTrialComponent

    • AWS::SageMaker::FeatureGroup

    • AWS::S3::AccessPoint

    • AWS::S3ObjectLambda::AccessPoint

    • AWS::S3Outposts::Object

    • AWS::SSMMessages::ControlChannel

    • AWS::VerifiedPermissions::PolicyStore

    You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector.

  • resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value.

    The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.

    • arn:<partition>:s3:::<bucket_name>/

    • arn:<partition>:s3:::<bucket_name>/<object_path>/

    When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>

    When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:lambda:<region>:<account_ID>:function:<function_name>

    When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cloudtrail:<region>:<account_ID>:channel/<channel_UUID>

    When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:codewhisperer:<region>:<account_ID>:profile/<profile_ID>

    When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:cognito-identity:<region>:<account_ID>:identitypool/<identity_pool_ID>

    When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:dynamodb:<region>:<account_ID>:table/<table_name>/stream/<date_time>

    When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:ec2:<region>::snapshot/<snapshot_ID>

    When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:emrwal:<region>::workspace/<workspace_name>

    When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:finspace:<region>:<account_ID>:environment/<environment_ID>

    When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:glue:<region>:<account_ID>:table/<database_name>/<table_name>

    When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:guardduty:<region>:<account_ID>:detector/<detector_ID>

    When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:kendra-ranking:<region>:<account_ID>:rescore-execution-plan/<rescore_execution_plan_ID>

    When resources.type equals AWS::ManagedBlockchain::Network, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:managedblockchain:::networks/<network_name>

    When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:managedblockchain:<region>:<account_ID>:nodes/<node_ID>

    When resources.type equals AWS::MedicalImaging::Datastore, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:medical-imaging:<region>:<account_ID>:datastore/<data_store_ID>

    When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:experiment-trial-component/<experiment_trial_component_name>

    When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:sagemaker:<region>:<account_ID>:feature-group/<feature_group_name>

    When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators.

    • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>

    • arn:<partition>:s3:<region>:<account_ID>:accesspoint/<access_point_name>/object/<object_path>

    When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:s3-object-lambda:<region>:<account_ID>:accesspoint/<access_point_name>

    When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:s3-outposts:<region>:<account_ID>:<object_path>

    When resources.type equals AWS::SSMMessages::ControlChannel, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:ssmmessages:<region>:<account_ID>:control-channel/<channel_ID>

    When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format:

    • arn:<partition>:verifiedpermissions:<region>:<account_ID>:policy-store/<policy_store_UUID>

" }, "Equals":{ "shape":"Operator", @@ -1379,7 +1379,7 @@ }, "RetentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

The retention period of the event data store, in days. You can set a retention period of up to 2557 days, the equivalent of seven years.

" + "documentation":"

The retention period of the event data store, in days. You can set a retention period of up to 2557 days, the equivalent of seven years. CloudTrail Lake determines whether to retain an event by checking if the eventTime of the event is within the specified retention period. For example, if you set a retention period of 90 days, CloudTrail will remove events when the eventTime is older than 90 days.

If you plan to copy trail events to this event data store, we recommend that you consider both the age of the events that you want to copy as well as how long you want to keep the copied events in your event data store. For example, if you copy trail events that are 5 years old and specify a retention period of 7 years, the event data store will retain those events for two years.

" }, "TerminationProtectionEnabled":{ "shape":"TerminationProtectionEnabled", @@ -1565,7 +1565,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The resource type in which you want to log data events. You can specify the following basic event selector resource types:

  • AWS::DynamoDB::Table

  • AWS::Lambda::Function

  • AWS::S3::Object

The following resource types are also available through advanced event selectors. Basic event selector resource types are valid in advanced event selectors, but advanced event selector resource types are not valid in basic event selectors. For more information, see AdvancedFieldSelector.

  • AWS::CloudTrail::Channel

  • AWS::CodeWhisperer::Profile

  • AWS::Cognito::IdentityPool

  • AWS::DynamoDB::Stream

  • AWS::EC2::Snapshot

  • AWS::EMRWAL::Workspace

  • AWS::FinSpace::Environment

  • AWS::Glue::Table

  • AWS::GuardDuty::Detector

  • AWS::KendraRanking::ExecutionPlan

  • AWS::ManagedBlockchain::Node

  • AWS::SageMaker::ExperimentTrialComponent

  • AWS::SageMaker::FeatureGroup

  • AWS::S3::AccessPoint

  • AWS::S3ObjectLambda::AccessPoint

  • AWS::S3Outposts::Object

" + "documentation":"

The resource type in which you want to log data events. You can specify the following basic event selector resource types:

  • AWS::DynamoDB::Table

  • AWS::Lambda::Function

  • AWS::S3::Object

The following resource types are also available through advanced event selectors. Basic event selector resource types are valid in advanced event selectors, but advanced event selector resource types are not valid in basic event selectors. For more information, see AdvancedFieldSelector.

  • AWS::CloudTrail::Channel

  • AWS::CodeWhisperer::Profile

  • AWS::Cognito::IdentityPool

  • AWS::DynamoDB::Stream

  • AWS::EC2::Snapshot

  • AWS::EMRWAL::Workspace

  • AWS::FinSpace::Environment

  • AWS::Glue::Table

  • AWS::GuardDuty::Detector

  • AWS::KendraRanking::ExecutionPlan

  • AWS::ManagedBlockchain::Network

  • AWS::ManagedBlockchain::Node

  • AWS::MedicalImaging::Datastore

  • AWS::SageMaker::ExperimentTrialComponent

  • AWS::SageMaker::FeatureGroup

  • AWS::S3::AccessPoint

  • AWS::S3ObjectLambda::AccessPoint

  • AWS::S3Outposts::Object

  • AWS::SSMMessages::ControlChannel

  • AWS::VerifiedPermissions::PolicyStore

" }, "Values":{ "shape":"DataResourceValues", @@ -3182,7 +3182,7 @@ "type":"structure", "members":{ }, - "documentation":"

You are already running the maximum number of concurrent queries. Wait a minute for some queries to finish, and then run the query again.

", + "documentation":"

You are already running the maximum number of concurrent queries. The maximum number of concurrent queries is 10. Wait a minute for some queries to finish, and then run the query again.

", "exception":true }, "MaxQueryResults":{ @@ -4215,7 +4215,7 @@ }, "RetentionPeriod":{ "shape":"RetentionPeriod", - "documentation":"

The retention period, in days.

" + "documentation":"

The retention period of the event data store, in days. You can set a retention period of up to 2557 days, the equivalent of seven years. CloudTrail Lake determines whether to retain an event by checking if the eventTime of the event is within the specified retention period. For example, if you set a retention period of 90 days, CloudTrail will remove events when the eventTime is older than 90 days.

If you decrease the retention period of an event data store, CloudTrail will remove any events with an eventTime older than the new retention period. For example, if the previous retention period was 365 days and you decrease it to 100 days, CloudTrail will remove events with an eventTime older than 100 days.

" }, "TerminationProtectionEnabled":{ "shape":"TerminationProtectionEnabled", diff --git a/services/cloudtraildata/pom.xml b/services/cloudtraildata/pom.xml index 09a6cd03ae1..43c6e00ade4 100644 --- a/services/cloudtraildata/pom.xml +++ b/services/cloudtraildata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudtraildata AWS Java SDK :: Services :: Cloud Trail Data diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index baf61c5345d..c482b63caf9 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index 1e749dfdbed..f17f1a9e29a 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index 39d0873af09..ffc4d44d20a 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 81ed0390b37..23cf96e5b2b 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 2bf613c4413..23c6afa5bc1 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codecatalyst/pom.xml b/services/codecatalyst/pom.xml index 73d09be84b0..e6c1b53be73 100644 --- a/services/codecatalyst/pom.xml +++ b/services/codecatalyst/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codecatalyst AWS Java SDK :: Services :: Code Catalyst diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index b42801ad7df..de0becc06b9 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index 61192637978..f04590778f6 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index d53a2dfcd02..96148f9a33a 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index e887c30bbdf..9ee676d8e62 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurusecurity/pom.xml b/services/codegurusecurity/pom.xml index ef969a3b7e9..11e32ba49d7 100644 --- a/services/codegurusecurity/pom.xml +++ b/services/codegurusecurity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codegurusecurity AWS Java SDK :: Services :: Code Guru Security diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 8331b0a32f1..7cd9396534c 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index d632c675e84..59c442de302 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index d6b97f7bd5d..651cc62b507 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarconnections/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/codestarconnections/src/main/resources/codegen-resources/endpoint-rule-set.json index 698d9be7ea1..54683182617 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codestar-connections-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codestar-connections-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://codestar-connections-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codestar-connections.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://codestar-connections-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://codestar-connections.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://codestar-connections.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://codestar-connections.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/codestarconnections/src/main/resources/codegen-resources/endpoint-tests.json b/services/codestarconnections/src/main/resources/codegen-resources/endpoint-tests.json index 5c75a579525..c384373745c 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-south-1.amazonaws.com" + "url": "https://codestar-connections.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-south-1.api.aws" + "url": "https://codestar-connections.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,48 +34,35 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ca-central-1.amazonaws.com" + "url": "https://codestar-connections.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ca-central-1.api.aws" + "url": "https://codestar-connections.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -99,48 +73,9 @@ } }, "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -151,152 +86,9 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -307,100 +99,22 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.eu-west-2.api.aws" + "url": "https://codestar-connections.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -411,438 +125,352 @@ } }, "params": { - "UseDualStack": false, "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.eu-west-1.api.aws" + "url": "https://codestar-connections.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.eu-west-1.amazonaws.com" + "url": "https://codestar-connections.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-northeast-2.api.aws" + "url": "https://codestar-connections.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-northeast-2.amazonaws.com" + "url": "https://codestar-connections.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-northeast-2.api.aws" + "url": "https://codestar-connections.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-northeast-2.amazonaws.com" + "url": "https://codestar-connections.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-northeast-1.api.aws" + "url": "https://codestar-connections-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-northeast-1.amazonaws.com" + "url": "https://codestar-connections-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-northeast-1.api.aws" + "url": "https://codestar-connections.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-northeast-1.amazonaws.com" + "url": "https://codestar-connections-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.sa-east-1.api.aws" + "url": "https://codestar-connections-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.sa-east-1.amazonaws.com" + "url": "https://codestar-connections.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.sa-east-1.api.aws" + "url": "https://codestar-connections.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections.sa-east-1.amazonaws.com" + "url": "https://codestar-connections-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-southeast-1.api.aws" + "url": "https://codestar-connections-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-southeast-1.amazonaws.com" + "url": "https://codestar-connections.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-southeast-1.api.aws" + "url": "https://codestar-connections.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codestar-connections.ap-southeast-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.ap-southeast-2.api.aws" + "url": "https://codestar-connections-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.ap-southeast-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.ap-southeast-2.api.aws" + "url": "https://codestar-connections.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codestar-connections.ap-southeast-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections-fips.us-east-1.api.aws" + "url": "https://codestar-connections-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-east-1.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.us-east-1.api.aws" + "url": "https://codestar-connections.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://codestar-connections.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections-fips.us-east-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-east-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codestar-connections.us-east-2.amazonaws.com" - } - }, - "params": { + "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -852,9 +480,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -864,11 +492,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json index a1316735c54..6a5ab07ced5 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/service-2.json +++ b/services/codestarconnections/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceUnavailableException"} ], - "documentation":"

Creates a connection that can then be given to other AWS services like CodePipeline so that it can access third-party code repositories. The connection is in pending status until the third-party connection handshake is completed from the console.

" + "documentation":"

Creates a connection that can then be given to other Amazon Web Services services like CodePipeline so that it can access third-party code repositories. The connection is in pending status until the third-party connection handshake is completed from the console.

" }, "CreateHost":{ "name":"CreateHost", @@ -104,6 +104,9 @@ }, "input":{"shape":"ListConnectionsInput"}, "output":{"shape":"ListConnectionsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], "documentation":"

Lists the connections associated with your account.

" }, "ListHosts":{ @@ -154,7 +157,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes tags from an AWS resource.

" + "documentation":"

Removes tags from an Amazon Web Services resource.

" }, "UpdateHost":{ "name":"UpdateHost", @@ -199,11 +202,11 @@ "members":{ "ConnectionName":{ "shape":"ConnectionName", - "documentation":"

The name of the connection. Connection names must be unique in an AWS user account.

" + "documentation":"

The name of the connection. Connection names must be unique in an Amazon Web Services account.

" }, "ConnectionArn":{ "shape":"ConnectionArn", - "documentation":"

The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between AWS services.

The ARN is never reused if the connection is deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between Amazon Web Services.

The ARN is never reused if the connection is deleted.

" }, "ProviderType":{ "shape":"ProviderType", @@ -222,7 +225,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the host associated with the connection.

" } }, - "documentation":"

A resource that is used to connect third-party source providers with services like AWS CodePipeline.

Note: A connection created through CloudFormation, the CLI, or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by updating the connection in the console.

" + "documentation":"

A resource that is used to connect third-party source providers with services like CodePipeline.

Note: A connection created through CloudFormation, the CLI, or the SDK is in `PENDING` status by default. You can make its status `AVAILABLE` by updating the connection in the console.

" }, "ConnectionArn":{ "type":"string", @@ -258,7 +261,7 @@ }, "ConnectionName":{ "shape":"ConnectionName", - "documentation":"

The name of the connection to be created. The name must be unique in the calling AWS account.

" + "documentation":"

The name of the connection to be created.

" }, "Tags":{ "shape":"TagList", @@ -276,7 +279,7 @@ "members":{ "ConnectionArn":{ "shape":"ConnectionArn", - "documentation":"

The Amazon Resource Name (ARN) of the connection to be created. The ARN is used as the connection reference when the connection is shared between AWS services.

The ARN is never reused if the connection is deleted.

" + "documentation":"

The Amazon Resource Name (ARN) of the connection to be created. The ARN is used as the connection reference when the connection is shared between Amazon Web Services services.

The ARN is never reused if the connection is deleted.

" }, "Tags":{ "shape":"TagList", @@ -294,7 +297,7 @@ "members":{ "Name":{ "shape":"HostName", - "documentation":"

The name of the host to be created. The name must be unique in the calling AWS account.

" + "documentation":"

The name of the host to be created.

" }, "ProviderType":{ "shape":"ProviderType", @@ -569,7 +572,8 @@ "enum":[ "Bitbucket", "GitHub", - "GitHubEnterpriseServer" + "GitHubEnterpriseServer", + "GitLab" ] }, "ResourceNotFoundException":{ @@ -628,7 +632,7 @@ "documentation":"

The tag's value.

" } }, - "documentation":"

A tag is a key-value pair that is used to manage the resource.

This tag is available for use by AWS services that support tags.

" + "documentation":"

A tag is a key-value pair that is used to manage the resource.

This tag is available for use by Amazon Web Services services that support tags.

" }, "TagKey":{ "type":"string", @@ -775,5 +779,5 @@ "pattern":"vpc-\\w{8}(\\w{9})?" } }, - "documentation":"AWS CodeStar Connections

This AWS CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Connections API. You can use the connections API to work with connections and installations.

Connections are configurations that you use to connect AWS resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.

When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one.

When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections.

You can work with connections by calling:

  • CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.

  • DeleteConnection, which deletes the specified connection.

  • GetConnection, which returns information about the connection, including the connection status.

  • ListConnections, which lists the connections associated with your account.

You can work with hosts by calling:

  • CreateHost, which creates a host that represents the infrastructure where your provider is installed.

  • DeleteHost, which deletes the specified host.

  • GetHost, which returns information about the host, including the setup status.

  • ListHosts, which lists the hosts associated with your account.

You can work with tags in AWS CodeStar Connections by calling the following:

  • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeStar Connections.

  • TagResource, which adds or updates tags for a resource in AWS CodeStar Connections.

  • UntagResource, which removes tags for a resource in AWS CodeStar Connections.

For information about how to use AWS CodeStar Connections, see the Developer Tools User Guide.

" + "documentation":"AWS CodeStar Connections

This Amazon Web Services CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the Amazon Web Services CodeStar Connections API. You can use the connections API to work with connections and installations.

Connections are configurations that you use to connect Amazon Web Services resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection.

When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one.

When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections.

You can work with connections by calling:

  • CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline.

  • DeleteConnection, which deletes the specified connection.

  • GetConnection, which returns information about the connection, including the connection status.

  • ListConnections, which lists the connections associated with your account.

You can work with hosts by calling:

  • CreateHost, which creates a host that represents the infrastructure where your provider is installed.

  • DeleteHost, which deletes the specified host.

  • GetHost, which returns information about the host, including the setup status.

  • ListHosts, which lists the hosts associated with your account.

You can work with tags in Amazon Web Services CodeStar Connections by calling the following:

  • ListTagsForResource, which gets information about Amazon Web Services tags for a specified Amazon Resource Name (ARN) in Amazon Web Services CodeStar Connections.

  • TagResource, which adds or updates tags for a resource in Amazon Web Services CodeStar Connections.

  • UntagResource, which removes tags for a resource in Amazon Web Services CodeStar Connections.

For information about how to use Amazon Web Services CodeStar Connections, see the Developer Tools User Guide.

" } diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index 04757cbf041..facbf562765 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index edbe626806a..da7f4390ea1 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index 79fbd205d58..79d41a01b63 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json index 0426da05beb..e2356443cd4 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"UserImportInProgressException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Adds additional user attributes to the user pool schema.

" + "documentation":"

Adds additional user attributes to the user pool schema.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminAddUserToGroup":{ "name":"AdminAddUserToGroup", @@ -45,7 +45,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Adds the specified user to the specified group.

Calling this action requires developer credentials.

" + "documentation":"

Adds the specified user to the specified group.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminConfirmSignUp":{ "name":"AdminConfirmSignUp", @@ -68,7 +68,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Confirms user registration as an admin without using a confirmation code. Works on any user.

Calling this action requires developer credentials.

" + "documentation":"

Confirms user registration as an admin without using a confirmation code. Works on any user.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminCreateUser":{ "name":"AdminCreateUser", @@ -96,7 +96,7 @@ {"shape":"UnsupportedUserStateException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new user in the specified user pool.

If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS).

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password.

Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email.

In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password.

AdminCreateUser requires developer credentials.

" + "documentation":"

Creates a new user in the specified user pool.

If MessageAction isn't set, the default is to send a welcome message via email or phone (SMS).

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

This message is based on a template that you configured in your call to create or update a user pool. This template includes your custom sign-up instructions and placeholders for user name and temporary password.

Alternatively, you can call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't send any email.

In either case, the user will be in the FORCE_CHANGE_PASSWORD state until they sign in and change their password.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminDeleteUser":{ "name":"AdminDeleteUser", @@ -113,7 +113,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes a user as an administrator. Works on any user.

Calling this action requires developer credentials.

" + "documentation":"

Deletes a user as an administrator. Works on any user.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminDeleteUserAttributes":{ "name":"AdminDeleteUserAttributes", @@ -131,7 +131,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes the user attributes in a user pool as an administrator. Works on any user.

Calling this action requires developer credentials.

" + "documentation":"

Deletes the user attributes in a user pool as an administrator. Works on any user.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminDisableProviderForUser":{ "name":"AdminDisableProviderForUser", @@ -150,7 +150,7 @@ {"shape":"AliasExistsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Prevents the user from signing in with the specified external (SAML or social) identity provider (IdP). If the user that you want to deactivate is a Amazon Cognito user pools native username + password user, they can't use their password to sign in. If the user to deactivate is a linked external IdP user, any link between that user and an existing user is removed. When the external user signs in again, and the user is no longer attached to the previously linked DestinationUser, the user must create a new user account. See AdminLinkProviderForUser.

This action is enabled only for admin access and requires developer credentials.

The ProviderName must match the value specified when creating an IdP for the pool.

To deactivate a native username + password user, the ProviderName value must be Cognito and the ProviderAttributeName must be Cognito_Subject. The ProviderAttributeValue must be the name that is used in the user pool for the user.

The ProviderAttributeName must always be Cognito_Subject for social IdPs. The ProviderAttributeValue must always be the exact subject that was used when the user was originally linked as a source user.

For de-linking a SAML identity, there are two scenarios. If the linked identity has not yet been used to sign in, the ProviderAttributeName and ProviderAttributeValue must be the same values that were used for the SourceUser when the identities were originally linked using AdminLinkProviderForUser call. (If the linking was done with ProviderAttributeName set to Cognito_Subject, the same applies here). However, if the user has already signed in, the ProviderAttributeName must be Cognito_Subject and ProviderAttributeValue must be the subject of the SAML assertion.

" + "documentation":"

Prevents the user from signing in with the specified external (SAML or social) identity provider (IdP). If the user that you want to deactivate is a Amazon Cognito user pools native username + password user, they can't use their password to sign in. If the user to deactivate is a linked external IdP user, any link between that user and an existing user is removed. When the external user signs in again, and the user is no longer attached to the previously linked DestinationUser, the user must create a new user account. See AdminLinkProviderForUser.

The ProviderName must match the value specified when creating an IdP for the pool.

To deactivate a native username + password user, the ProviderName value must be Cognito and the ProviderAttributeName must be Cognito_Subject. The ProviderAttributeValue must be the name that is used in the user pool for the user.

The ProviderAttributeName must always be Cognito_Subject for social IdPs. The ProviderAttributeValue must always be the exact subject that was used when the user was originally linked as a source user.

For de-linking a SAML identity, there are two scenarios. If the linked identity has not yet been used to sign in, the ProviderAttributeName and ProviderAttributeValue must be the same values that were used for the SourceUser when the identities were originally linked using AdminLinkProviderForUser call. (If the linking was done with ProviderAttributeName set to Cognito_Subject, the same applies here). However, if the user has already signed in, the ProviderAttributeName must be Cognito_Subject and ProviderAttributeValue must be the subject of the SAML assertion.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminDisableUser":{ "name":"AdminDisableUser", @@ -168,7 +168,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deactivates a user and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to GetUser and ListUsers API requests.

You must make this API request with Amazon Web Services credentials that have cognito-idp:AdminDisableUser permissions.

" + "documentation":"

Deactivates a user and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to GetUser and ListUsers API requests.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminEnableUser":{ "name":"AdminEnableUser", @@ -186,7 +186,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Enables the specified user as an administrator. Works on any user.

Calling this action requires developer credentials.

" + "documentation":"

Enables the specified user as an administrator. Works on any user.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminForgetDevice":{ "name":"AdminForgetDevice", @@ -204,7 +204,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Forgets the device, as an administrator.

Calling this action requires developer credentials.

" + "documentation":"

Forgets the device, as an administrator.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminGetDevice":{ "name":"AdminGetDevice", @@ -222,7 +222,7 @@ {"shape":"InternalErrorException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Gets the device, as an administrator.

Calling this action requires developer credentials.

" + "documentation":"

Gets the device, as an administrator.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminGetUser":{ "name":"AdminGetUser", @@ -240,7 +240,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Gets the specified user by user name in a user pool as an administrator. Works on any user.

Calling this action requires developer credentials.

" + "documentation":"

Gets the specified user by user name in a user pool as an administrator. Works on any user.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminInitiateAuth":{ "name":"AdminInitiateAuth", @@ -267,7 +267,7 @@ {"shape":"UserNotFoundException"}, {"shape":"UserNotConfirmedException"} ], - "documentation":"

Initiates the authentication flow, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" + "documentation":"

Initiates the authentication flow, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminLinkProviderForUser":{ "name":"AdminLinkProviderForUser", @@ -287,7 +287,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Links an existing user account in a user pool (DestinationUser) to an identity from an external IdP (SourceUser) based on a specified attribute name and value from the external IdP. This allows you to create a link from the existing user account to an external federated user identity that has not yet been used to sign in. You can then use the federated user identity to sign in as the existing user account.

For example, if there is an existing user with a username and password, this API links that user to a federated user identity. When the user signs in with a federated user identity, they sign in as the existing user account.

The maximum number of federated identities linked to a user is five.

Because this API allows a user with an external federated identity to sign in as an existing user in the user pool, it is critical that it only be used with external IdPs and provider attributes that have been trusted by the application owner.

This action is administrative and requires developer credentials.

" + "documentation":"

Links an existing user account in a user pool (DestinationUser) to an identity from an external IdP (SourceUser) based on a specified attribute name and value from the external IdP. This allows you to create a link from the existing user account to an external federated user identity that has not yet been used to sign in. You can then use the federated user identity to sign in as the existing user account.

For example, if there is an existing user with a username and password, this API links that user to a federated user identity. When the user signs in with a federated user identity, they sign in as the existing user account.

The maximum number of federated identities linked to a user is five.

Because this API allows a user with an external federated identity to sign in as an existing user in the user pool, it is critical that it only be used with external IdPs and provider attributes that have been trusted by the application owner.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminListDevices":{ "name":"AdminListDevices", @@ -305,7 +305,7 @@ {"shape":"InternalErrorException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Lists devices, as an administrator.

Calling this action requires developer credentials.

" + "documentation":"

Lists devices, as an administrator.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminListGroupsForUser":{ "name":"AdminListGroupsForUser", @@ -323,7 +323,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the groups that the user belongs to.

Calling this action requires developer credentials.

" + "documentation":"

Lists the groups that the user belongs to.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminListUserAuthEvents":{ "name":"AdminListUserAuthEvents", @@ -342,7 +342,7 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

A history of user activity and any risks detected as part of Amazon Cognito advanced security.

" + "documentation":"

A history of user activity and any risks detected as part of Amazon Cognito advanced security.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminRemoveUserFromGroup":{ "name":"AdminRemoveUserFromGroup", @@ -359,7 +359,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Removes the specified user from the specified group.

Calling this action requires developer credentials.

" + "documentation":"

Removes the specified user from the specified group.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminResetUserPassword":{ "name":"AdminResetUserPassword", @@ -384,7 +384,7 @@ {"shape":"InvalidSmsRoleTrustRelationshipException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Resets the specified user's password in a user pool as an administrator. Works on any user.

When a developer calls this API, the current password is invalidated, so it must be changed. If a user tries to sign in after the API is called, the app will get a PasswordResetRequiredException exception back and should direct the user down the flow to reset the password, which is the same as the forgot password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" + "documentation":"

Resets the specified user's password in a user pool as an administrator. Works on any user.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminRespondToAuthChallenge":{ "name":"AdminRespondToAuthChallenge", @@ -416,7 +416,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"SoftwareTokenMFANotFoundException"} ], - "documentation":"

Responds to an authentication challenge, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" + "documentation":"

Responds to an authentication challenge, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminSetUserMFAPreference":{ "name":"AdminSetUserMFAPreference", @@ -435,7 +435,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

The user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in.

" + "documentation":"

The user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminSetUserPassword":{ "name":"AdminSetUserPassword", @@ -454,7 +454,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InvalidPasswordException"} ], - "documentation":"

Sets the specified user's password in a user pool as an administrator. Works on any user.

The password can be temporary or permanent. If it is temporary, the user status enters the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password.

Once the user has set a new password, or the password is permanent, the user status is set to Confirmed.

" + "documentation":"

Sets the specified user's password in a user pool as an administrator. Works on any user.

The password can be temporary or permanent. If it is temporary, the user status enters the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password.

Once the user has set a new password, or the password is permanent, the user status is set to Confirmed.

AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminSetUserSettings":{ "name":"AdminSetUserSettings", @@ -471,7 +471,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use AdminSetUserMFAPreference instead.

" + "documentation":"

This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use AdminSetUserMFAPreference instead.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminUpdateAuthEventFeedback":{ "name":"AdminUpdateAuthEventFeedback", @@ -490,7 +490,7 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

" + "documentation":"

Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminUpdateDeviceStatus":{ "name":"AdminUpdateDeviceStatus", @@ -509,7 +509,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates the device status as an administrator.

Calling this action requires developer credentials.

" + "documentation":"

Updates the device status as an administrator.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminUpdateUserAttributes":{ "name":"AdminUpdateUserAttributes", @@ -534,7 +534,7 @@ {"shape":"InvalidEmailRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} ], - "documentation":"

Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.

For custom attributes, you must prepend the custom: prefix to the attribute name.

In addition to updating user attributes, this API can also be used to mark phone and email as verified.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Calling this action requires developer credentials.

" + "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.

For custom attributes, you must prepend the custom: prefix to the attribute name.

In addition to updating user attributes, this API can also be used to mark phone and email as verified.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminUserGlobalSignOut":{ "name":"AdminUserGlobalSignOut", @@ -552,7 +552,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Signs out a user from all devices. You must sign AdminUserGlobalSignOut requests with Amazon Web Services credentials. It also invalidates all refresh tokens that Amazon Cognito has issued to a user. The user's current access and ID tokens remain valid until they expire. By default, access and ID tokens expire one hour after they're issued. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the cookie validity period of 1 hour.

Calling this action requires developer credentials.

" + "documentation":"

Signs out a user from all devices. AdminUserGlobalSignOut invalidates all identity, access and refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.

Your app isn't aware that a user's access token is revoked unless it attempts to authorize a user pools API request with an access token that contains the scope aws.cognito.signin.user.admin. Your app might otherwise accept access tokens until they expire.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AssociateSoftwareToken":{ "name":"AssociateSoftwareToken", @@ -571,7 +571,7 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

" + "documentation":"

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "ChangePassword":{ "name":"ChangePassword", @@ -594,7 +594,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Changes the password for a specified user in a user pool.

", + "documentation":"

Changes the password for a specified user in a user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", "authtype":"none" }, "ConfirmDevice":{ @@ -620,7 +620,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Confirms tracking of the device. This API call is the call that begins device tracking.

" + "documentation":"

Confirms tracking of the device. This API call is the call that begins device tracking.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "ConfirmForgotPassword":{ "name":"ConfirmForgotPassword", @@ -648,7 +648,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Allows a user to enter a confirmation code to reset a forgotten password.

", + "documentation":"

Allows a user to enter a confirmation code to reset a forgotten password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", "authtype":"none" }, "ConfirmSignUp":{ @@ -676,7 +676,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Confirms registration of a new user.

", + "documentation":"

Confirms registration of a new user.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", "authtype":"none" }, "CreateGroup":{ @@ -696,7 +696,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new group in the specified user pool.

Calling this action requires developer credentials.

" + "documentation":"

Creates a new group in the specified user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateIdentityProvider":{ "name":"CreateIdentityProvider", @@ -715,7 +715,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates an IdP for a user pool.

" + "documentation":"

Creates an IdP for a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateResourceServer":{ "name":"CreateResourceServer", @@ -733,7 +733,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new OAuth2.0 resource server and defines custom scopes within it.

" + "documentation":"

Creates a new OAuth2.0 resource server and defines custom scopes within it.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserImportJob":{ "name":"CreateUserImportJob", @@ -752,7 +752,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates the user import job.

" + "documentation":"

Creates a user import job.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserPool":{ "name":"CreateUserPool", @@ -773,7 +773,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new Amazon Cognito user pool and sets the password policy for the pool.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

" + "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Creates a new Amazon Cognito user pool and sets the password policy for the pool.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserPoolClient":{ "name":"CreateUserPoolClient", @@ -793,7 +793,7 @@ {"shape":"InvalidOAuthFlowException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates the user pool client.

When you create a new user pool client, token revocation is automatically activated. For more information about revoking tokens, see RevokeToken.

" + "documentation":"

Creates the user pool client.

When you create a new user pool client, token revocation is automatically activated. For more information about revoking tokens, see RevokeToken.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserPoolDomain":{ "name":"CreateUserPoolDomain", @@ -810,7 +810,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new domain for a user pool.

" + "documentation":"

Creates a new domain for a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DeleteGroup":{ "name":"DeleteGroup", @@ -880,7 +880,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Allows a user to delete himself or herself.

", + "documentation":"

Allows a user to delete their own user profile.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", "authtype":"none" }, "DeleteUserAttributes":{ @@ -902,7 +902,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Deletes the attributes for a user.

", + "documentation":"

Deletes the attributes for a user.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", "authtype":"none" }, "DeleteUserPool":{ @@ -1040,7 +1040,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Returns the configuration information and metadata of the specified user pool.

" + "documentation":"

Returns the configuration information and metadata of the specified user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DescribeUserPoolClient":{ "name":"DescribeUserPoolClient", @@ -1057,7 +1057,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Client method for returning the configuration information and metadata of the specified user pool app client.

" + "documentation":"

Client method for returning the configuration information and metadata of the specified user pool app client.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DescribeUserPoolDomain":{ "name":"DescribeUserPoolDomain", @@ -1094,7 +1094,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Forgets the specified device.

" + "documentation":"

Forgets the specified device.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "ForgotPassword":{ "name":"ForgotPassword", @@ -1121,7 +1121,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. If neither a verified phone number nor a verified email exists, an InvalidParameterException is thrown. To use the confirmation code for resetting the password, call ConfirmForgotPassword.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.

If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "GetCSVHeader":{ @@ -1161,7 +1161,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Gets the device.

" + "documentation":"

Gets the device.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "GetGroup":{ "name":"GetGroup", @@ -1197,6 +1197,23 @@ ], "documentation":"

Gets the specified IdP.

" }, + "GetLogDeliveryConfiguration":{ + "name":"GetLogDeliveryConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLogDeliveryConfigurationRequest"}, + "output":{"shape":"GetLogDeliveryConfigurationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets the detailed activity logging configuration for a user pool.

" + }, "GetSigningCertificate":{ "name":"GetSigningCertificate", "http":{ @@ -1248,7 +1265,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Gets the user attributes and metadata for a user.

", + "documentation":"

Gets the user attributes and metadata for a user.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", "authtype":"none" }, "GetUserAttributeVerificationCode":{ @@ -1278,7 +1295,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "GetUserPoolMfaConfig":{ @@ -1316,7 +1333,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Signs out users from all devices. It also invalidates all refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.

" + "documentation":"

Signs out a user from all devices. GlobalSignOut invalidates all identity, access and refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.

Your app isn't aware that a user's access token is revoked unless it attempts to authorize a user pools API request with an access token that contains the scope aws.cognito.signin.user.admin. Your app might otherwise accept access tokens until they expire.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "InitiateAuth":{ "name":"InitiateAuth", @@ -1343,7 +1360,7 @@ {"shape":"InvalidSmsRoleTrustRelationshipException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "ListDevices":{ @@ -1366,7 +1383,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Lists the sign-in devices that Amazon Cognito has registered to the current user.

" + "documentation":"

Lists the sign-in devices that Amazon Cognito has registered to the current user.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "ListGroups":{ "name":"ListGroups", @@ -1383,7 +1400,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the groups associated with a user pool.

Calling this action requires developer credentials.

" + "documentation":"

Lists the groups associated with a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ListIdentityProviders":{ "name":"ListIdentityProviders", @@ -1400,7 +1417,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists information about all IdPs for a user pool.

" + "documentation":"

Lists information about all IdPs for a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ListResourceServers":{ "name":"ListResourceServers", @@ -1417,7 +1434,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the resource servers for a user pool.

" + "documentation":"

Lists the resource servers for a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1451,7 +1468,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the user import jobs.

" + "documentation":"

Lists user import jobs for a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ListUserPoolClients":{ "name":"ListUserPoolClients", @@ -1468,7 +1485,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the clients that have been created for the specified user pool.

" + "documentation":"

Lists the clients that have been created for the specified user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ListUserPools":{ "name":"ListUserPools", @@ -1484,7 +1501,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the user pools associated with an Amazon Web Services account.

" + "documentation":"

Lists the user pools associated with an Amazon Web Services account.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ListUsers":{ "name":"ListUsers", @@ -1501,7 +1518,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the users in the Amazon Cognito user pool.

" + "documentation":"

Lists users and their basic details in a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ListUsersInGroup":{ "name":"ListUsersInGroup", @@ -1518,7 +1535,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the users in the specified group.

Calling this action requires developer credentials.

" + "documentation":"

Lists the users in the specified group.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ResendConfirmationCode":{ "name":"ResendConfirmationCode", @@ -1545,7 +1562,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "RespondToAuthChallenge":{ @@ -1579,7 +1596,7 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Responds to the authentication challenge.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Responds to the authentication challenge.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "RevokeToken":{ @@ -1599,7 +1616,24 @@ {"shape":"UnsupportedTokenTypeException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

" + "documentation":"

Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" + }, + "SetLogDeliveryConfiguration":{ + "name":"SetLogDeliveryConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetLogDeliveryConfigurationRequest"}, + "output":{"shape":"SetLogDeliveryConfigurationResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InternalErrorException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotAuthorizedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Sets up or modifies the detailed activity logging configuration of a user pool.

" }, "SetRiskConfiguration":{ "name":"SetRiskConfiguration", @@ -1656,7 +1690,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

" + "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "SetUserPoolMfaConfig":{ "name":"SetUserPoolMfaConfig", @@ -1675,7 +1709,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Sets the user pool multi-factor authentication (MFA) configuration.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

" + "documentation":"

Sets the user pool multi-factor authentication (MFA) configuration.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

" }, "SetUserSettings":{ "name":"SetUserSettings", @@ -1695,7 +1729,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.

", + "documentation":"

This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", "authtype":"none" }, "SignUp":{ @@ -1723,7 +1757,7 @@ {"shape":"CodeDeliveryFailureException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Registers the user in the specified user pool and creates a user name, password, and user attributes.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Registers the user in the specified user pool and creates a user name, password, and user attributes.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "StartUserImportJob":{ @@ -1813,7 +1847,7 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

" + "documentation":"

Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "UpdateDeviceStatus":{ "name":"UpdateDeviceStatus", @@ -1835,7 +1869,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Updates the device status.

" + "documentation":"

Updates the device status.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "UpdateGroup":{ "name":"UpdateGroup", @@ -1852,7 +1886,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates the specified group with the specified attributes.

Calling this action requires developer credentials.

" + "documentation":"

Updates the specified group with the specified attributes.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "UpdateIdentityProvider":{ "name":"UpdateIdentityProvider", @@ -1871,7 +1905,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates IdP information for a user pool.

" + "documentation":"

Updates IdP information for a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "UpdateResourceServer":{ "name":"UpdateResourceServer", @@ -1888,7 +1922,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates the name and scopes of resource server. All other fields are read-only.

If you don't provide a value for an attribute, it is set to the default value.

" + "documentation":"

Updates the name and scopes of resource server. All other fields are read-only.

If you don't provide a value for an attribute, it is set to the default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "UpdateUserAttributes":{ "name":"UpdateUserAttributes", @@ -1919,7 +1953,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Allows a user to update a specific attribute (one at a time).

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Allows a user to update a specific attribute (one at a time).

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "UpdateUserPool":{ @@ -1943,7 +1977,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InvalidEmailRoleAccessPolicyException"} ], - "documentation":"

Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool. If you don't provide a value for an attribute, it will be set to the default value.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

" + "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings using DescribeUserPool.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "UpdateUserPoolClient":{ "name":"UpdateUserPoolClient", @@ -1963,7 +1997,7 @@ {"shape":"InvalidOAuthFlowException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates the specified user pool app client with the specified attributes. You can get a list of the current user pool app client settings using DescribeUserPoolClient.

If you don't provide a value for an attribute, it will be set to the default value.

You can also use this operation to enable token revocation for user pool clients. For more information about revoking tokens, see RevokeToken.

" + "documentation":"

Updates the specified user pool app client with the specified attributes. You can get a list of the current user pool app client settings using DescribeUserPoolClient.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

You can also use this operation to enable token revocation for user pool clients. For more information about revoking tokens, see RevokeToken.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "UpdateUserPoolDomain":{ "name":"UpdateUserPoolDomain", @@ -1980,7 +2014,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.

You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You can't use it to change the domain for a user pool.

A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.

Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.

However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.

When you add your new certificate in ACM, you must choose US East (N. Virginia) as the Amazon Web Services Region.

After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.

For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.

" + "documentation":"

Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.

You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You can't use it to change the domain for a user pool.

A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.

Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.

However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.

When you add your new certificate in ACM, you must choose US East (N. Virginia) as the Amazon Web Services Region.

After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.

For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "VerifySoftwareToken":{ "name":"VerifySoftwareToken", @@ -2006,7 +2040,7 @@ {"shape":"CodeMismatchException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

" + "documentation":"

Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

" }, "VerifyUserAttribute":{ "name":"VerifyUserAttribute", @@ -2031,12 +2065,16 @@ {"shape":"AliasExistsException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Verifies the specified user attributes in the user pool.

If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.

", + "documentation":"

Verifies the specified user attributes in the user pool.

If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", "authtype":"none" } }, "shapes":{ - "AWSAccountIdType":{"type":"string"}, + "AWSAccountIdType":{ + "type":"string", + "max":12, + "pattern":"[0-9]+" + }, "AccessTokenValidityType":{ "type":"integer", "max":86400, @@ -2179,7 +2217,7 @@ "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

If your user pool configuration includes triggers, the AdminConfirmSignUp API action invokes the Lambda function that is specified for the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. In this payload, the clientMetadata attribute provides the data that you assigned to the ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in Lambda, you can process the ClientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

  • Store the ClientMetadata value. This data is available only to Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration doesn't include triggers, the ClientMetadata parameter serves no purpose.

  • Validate the ClientMetadata value.

  • Encrypt the ClientMetadata value. Don't use Amazon Cognito to provide sensitive information.

" } }, - "documentation":"

Represents the request to confirm user registration.

" + "documentation":"

Confirm a user's registration as a user pool administrator.

" }, "AdminConfirmSignUpResponse":{ "type":"structure", @@ -2230,7 +2268,7 @@ }, "TemporaryPassword":{ "shape":"PasswordType", - "documentation":"

The user's temporary password. This password must conform to the password policy that you specified when you created the user pool.

The temporary password is valid only once. To complete the Admin Create User flow, the user must enter the temporary password in the sign-in page, along with a new password to be used in all future sign-ins.

This parameter isn't required. If you don't specify a value, Amazon Cognito generates one for you.

The temporary password can only be used until the user account expiration limit that you specified when you created the user pool. To reset the account after that time limit, you must call AdminCreateUser again, specifying \"RESEND\" for the MessageAction parameter.

" + "documentation":"

The user's temporary password. This password must conform to the password policy that you specified when you created the user pool.

The temporary password is valid only once. To complete the Admin Create User flow, the user must enter the temporary password in the sign-in page, along with a new password to be used in all future sign-ins.

This parameter isn't required. If you don't specify a value, Amazon Cognito generates one for you.

The temporary password can only be used until the user account expiration limit that you set for your user pool. To reset the account after that time limit, you must call AdminCreateUser again and specify RESEND for the MessageAction parameter.

" }, "ForceAliasCreation":{ "shape":"ForceAliasCreation", @@ -2464,7 +2502,7 @@ "members":{ "Username":{ "shape":"UsernameType", - "documentation":"

The user name of the user about whom you're receiving information.

" + "documentation":"

The username of the user that you requested.

" }, "UserAttributes":{ "shape":"AttributeListType", @@ -2476,7 +2514,7 @@ }, "UserLastModifiedDate":{ "shape":"DateType", - "documentation":"

The date the user was last modified.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "Enabled":{ "shape":"BooleanType", @@ -2484,7 +2522,7 @@ }, "UserStatus":{ "shape":"UserStatusType", - "documentation":"

The user status. Can be one of the following:

  • UNCONFIRMED - User has been created but not confirmed.

  • CONFIRMED - User has been confirmed.

  • ARCHIVED - User is no longer active.

  • UNKNOWN - User status isn't known.

  • RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in.

  • FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else.

" + "documentation":"

The user status. Can be one of the following:

  • UNCONFIRMED - User has been created but not confirmed.

  • CONFIRMED - User has been confirmed.

  • UNKNOWN - User status isn't known.

  • RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in.

  • FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else.

" }, "MFAOptions":{ "shape":"MFAOptionListType", @@ -2523,7 +2561,7 @@ }, "AuthParameters":{ "shape":"AuthParametersType", - "documentation":"

The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow:

  • For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For ADMIN_NO_SRP_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), PASSWORD (required), DEVICE_KEY.

  • For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value).

" + "documentation":"

The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow:

  • For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For ADMIN_USER_PASSWORD_AUTH: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value).

For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool.

" }, "ClientMetadata":{ "shape":"ClientMetadataType", @@ -2576,11 +2614,11 @@ }, "DestinationUser":{ "shape":"ProviderUserIdentifierType", - "documentation":"

The existing user in the user pool that you want to assign to the external IdP user account. This user can be a native (Username + Password) Amazon Cognito user pools user or a federated user (for example, a SAML or Facebook user). If the user doesn't exist, Amazon Cognito generates an exception. Amazon Cognito returns this user when the new user (with the linked IdP attribute) signs in.

For a native username + password user, the ProviderAttributeValue for the DestinationUser should be the username in the user pool. For a federated user, it should be the provider-specific user_id.

The ProviderAttributeName of the DestinationUser is ignored.

The ProviderName should be set to Cognito for users in Cognito user pools.

All attributes in the DestinationUser profile must be mutable. If you have assigned the user any immutable custom attributes, the operation won't succeed.

" + "documentation":"

The existing user in the user pool that you want to assign to the external IdP user account. This user can be a local (Username + Password) Amazon Cognito user pools user or a federated user (for example, a SAML or Facebook user). If the user doesn't exist, Amazon Cognito generates an exception. Amazon Cognito returns this user when the new user (with the linked IdP attribute) signs in.

For a native username + password user, the ProviderAttributeValue for the DestinationUser should be the username in the user pool. For a federated user, it should be the provider-specific user_id.

The ProviderAttributeName of the DestinationUser is ignored.

The ProviderName should be set to Cognito for users in Cognito user pools.

All attributes in the DestinationUser profile must be mutable. If you have assigned the user any immutable custom attributes, the operation won't succeed.

" }, "SourceUser":{ "shape":"ProviderUserIdentifierType", - "documentation":"

An external IdP account for a user who doesn't exist yet in the user pool. This user must be a federated user (for example, a SAML or Facebook user), not another native user.

If the SourceUser is using a federated social IdP, such as Facebook, Google, or Login with Amazon, you must set the ProviderAttributeName to Cognito_Subject. For social IdPs, the ProviderName will be Facebook, Google, or LoginWithAmazon, and Amazon Cognito will automatically parse the Facebook, Google, and Login with Amazon tokens for id, sub, and user_id, respectively. The ProviderAttributeValue for the user must be the same value as the id, sub, or user_id value found in the social IdP token.

For SAML, the ProviderAttributeName can be any value that matches a claim in the SAML assertion. If you want to link SAML users based on the subject of the SAML assertion, you should map the subject to a claim through the SAML IdP and submit that claim name as the ProviderAttributeName. If you set ProviderAttributeName to Cognito_Subject, Amazon Cognito will automatically parse the default unique identifier found in the subject from the SAML token.

" + "documentation":"

An external IdP account for a user who doesn't exist yet in the user pool. This user must be a federated user (for example, a SAML or Facebook user), not another native user.

If the SourceUser is using a federated social IdP, such as Facebook, Google, or Login with Amazon, you must set the ProviderAttributeName to Cognito_Subject. For social IdPs, the ProviderName will be Facebook, Google, or LoginWithAmazon, and Amazon Cognito will automatically parse the Facebook, Google, and Login with Amazon tokens for id, sub, and user_id, respectively. The ProviderAttributeValue for the user must be the same value as the id, sub, or user_id value found in the social IdP token.

For OIDC, the ProviderAttributeName can be any value that matches a claim in the ID token, or that your app retrieves from the userInfo endpoint. You must map the claim to a user pool attribute in your IdP configuration, and set the user pool attribute name as the value of ProviderAttributeName in your AdminLinkProviderForUser request.

For SAML, the ProviderAttributeName can be any value that matches a claim in the SAML assertion. To link SAML users based on the subject of the SAML assertion, map the subject to a claim through the SAML IdP and set that claim name as the value of ProviderAttributeName in your AdminLinkProviderForUser request.

For both OIDC and SAML users, when you set ProviderAttributeName to Cognito_Subject, Amazon Cognito will automatically parse the default unique identifier found in the subject from the IdP token.

" } } }, @@ -2777,7 +2815,7 @@ }, "ChallengeResponses":{ "shape":"ChallengeResponsesType", - "documentation":"

The challenge responses. These are inputs corresponding to the value of ChallengeName, for example:

  • SMS_MFA: SMS_MFA_CODE, USERNAME, SECRET_HASH (if app client is configured with client secret).

  • PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME, SECRET_HASH (if app client is configured with client secret).

    PASSWORD_VERIFIER requires DEVICE_KEY when signing in with a remembered device.

  • ADMIN_NO_SRP_AUTH: PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret).

  • NEW_PASSWORD_REQUIRED: NEW_PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required attributes that Amazon Cognito returned as requiredAttributes in the AdminInitiateAuth response, add a userAttributes.attributename parameter. This parameter can also set values for writable attributes that aren't required by your user pool.

    In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. In AdminRespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the AdminUpdateUserAttributes API operation to modify the value of any additional attributes.

  • MFA_SETUP requires USERNAME, plus you must use the session value returned by VerifySoftwareToken in the Session parameter.

The value of the USERNAME attribute must be the user's actual username, not an alias (such as an email address or phone number). To make this simpler, the AdminInitiateAuth response includes the actual username value in the USERNAMEUSER_ID_FOR_SRP attribute. This happens even if you specified an alias in your call to AdminInitiateAuth.

" + "documentation":"

The challenge responses. These are inputs corresponding to the value of ChallengeName, for example:

  • SMS_MFA: SMS_MFA_CODE, USERNAME, SECRET_HASH (if app client is configured with client secret).

  • PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME, SECRET_HASH (if app client is configured with client secret).

    PASSWORD_VERIFIER requires DEVICE_KEY when signing in with a remembered device.

  • ADMIN_NO_SRP_AUTH: PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret).

  • NEW_PASSWORD_REQUIRED: NEW_PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required attributes that Amazon Cognito returned as requiredAttributes in the AdminInitiateAuth response, add a userAttributes.attributename parameter. This parameter can also set values for writable attributes that aren't required by your user pool.

    In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. In AdminRespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the AdminUpdateUserAttributes API operation to modify the value of any additional attributes.

  • MFA_SETUP requires USERNAME, plus you must use the session value returned by VerifySoftwareToken in the Session parameter.

The value of the USERNAME attribute must be the user's actual username, not an alias (such as an email address or phone number). To make this simpler, the AdminInitiateAuth response includes the actual username value in the USERNAMEUSER_ID_FOR_SRP attribute. This happens even if you specified an alias in your call to AdminInitiateAuth.

For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool.

" }, "Session":{ "shape":"SessionType", @@ -2933,7 +2971,7 @@ }, "FeedbackValue":{ "shape":"FeedbackValueType", - "documentation":"

The authentication event feedback value.

" + "documentation":"

The authentication event feedback value. When you provide a FeedbackValue value of valid, you tell Amazon Cognito that you trust a user session where Amazon Cognito has evaluated some level of risk. When you provide a FeedbackValue value of invalid, you tell Amazon Cognito that you don't trust a user session, or you don't believe that Amazon Cognito evaluated a high-enough risk level.

" } } }, @@ -3087,7 +3125,7 @@ "documentation":"

If UserDataShared is true, Amazon Cognito includes user data in the events that it publishes to Amazon Pinpoint analytics.

" } }, - "documentation":"

The Amazon Pinpoint analytics configuration necessary to collect metrics for a user pool.

In Regions where Amazon Pinpointisn't available, user pools only support sending events to Amazon Pinpoint projects in us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region.

" + "documentation":"

The Amazon Pinpoint analytics configuration necessary to collect metrics for a user pool.

In Regions where Amazon Pinpoint isn't available, user pools only support sending events to Amazon Pinpoint projects in us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region.

" }, "AnalyticsMetadataType":{ "type":"structure", @@ -3201,7 +3239,7 @@ }, "CreationDate":{ "shape":"DateType", - "documentation":"

The creation date

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" }, "EventResponse":{ "shape":"EventResponseType", @@ -3289,7 +3327,11 @@ "max":200 }, "BooleanType":{"type":"boolean"}, - "CSSType":{"type":"string"}, + "CSSType":{ + "type":"string", + "max":131072, + "min":0 + }, "CSSVersionType":{"type":"string"}, "CallbackURLsListType":{ "type":"list", @@ -3352,8 +3394,7 @@ "ChallengeResponsesType":{ "type":"map", "key":{"shape":"StringType"}, - "value":{"shape":"StringType"}, - "sensitive":true + "value":{"shape":"StringType"} }, "ChangePasswordRequest":{ "type":"structure", @@ -3418,6 +3459,16 @@ "pattern":"[\\w+]+", "sensitive":true }, + "CloudWatchLogsConfigurationType":{ + "type":"structure", + "members":{ + "LogGroupArn":{ + "shape":"ArnType", + "documentation":"

The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account as your user pool.

" + } + }, + "documentation":"

The CloudWatch logging destination of a user pool detailed activity logging configuration.

" + }, "CodeDeliveryDetailsListType":{ "type":"list", "member":{"shape":"CodeDeliveryDetailsType"} @@ -3563,7 +3614,7 @@ }, "SecretHash":{ "shape":"SecretHashType", - "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

" + "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values.

" }, "Username":{ "shape":"UsernameType", @@ -3866,7 +3917,7 @@ }, "IdTokenValidity":{ "shape":"IdTokenValidityType", - "documentation":"

The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.

For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.

The default time unit for AccessTokenValidity in an API request is hours. Valid range is displayed below in seconds.

If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.

" + "documentation":"

The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.

For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.

The default time unit for IdTokenValidity in an API request is hours. Valid range is displayed below in seconds.

If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.

" }, "TokenValidityUnits":{ "shape":"TokenValidityUnitsType", @@ -3910,7 +3961,7 @@ }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", - "documentation":"

Set to true if the client is allowed to follow the OAuth protocol when interacting with Amazon Cognito user pools.

" + "documentation":"

Set to true to use OAuth 2.0 features in your user pool app client.

AllowedOAuthFlowsUserPoolClient must be true before you can configure the following features in your app client.

  • CallBackURLs: Callback URLs.

  • LogoutURLs: Sign-out redirect URLs.

  • AllowedOAuthScopes: OAuth 2.0 scopes.

  • AllowedOAuthFlows: Support for authorization code, implicit, and client credentials OAuth 2.0 grants.

To use OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient or UpdateUserPoolClient API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient in a request with the CLI or SDKs, it defaults to false.

" }, "AnalyticsConfiguration":{ "shape":"AnalyticsConfigurationType", @@ -4061,11 +4112,11 @@ }, "UserPoolAddOns":{ "shape":"UserPoolAddOnsType", - "documentation":"

Enables advanced security risk detection. Set the key AdvancedSecurityMode to the value \"AUDIT\".

" + "documentation":"

User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.

For more information, see Adding advanced security to a user pool.

" }, "UsernameConfiguration":{ "shape":"UsernameConfigurationType", - "documentation":"

Case sensitivity on the username input for the selected sign-in option. For example, when case sensitivity is set to False, users can sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set. For more information, see UsernameConfigurationType.

" + "documentation":"

Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to False (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, username, USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set case sensitivity to False (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.

This configuration is immutable after you set it. For more information, see UsernameConfigurationType.

" }, "AccountRecoverySetting":{ "shape":"AccountRecoverySettingType", @@ -4559,7 +4610,7 @@ }, "DeviceLastModifiedDate":{ "shape":"DateType", - "documentation":"

The last modified date of the device.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "DeviceLastAuthenticatedDate":{ "shape":"DateType", @@ -4589,7 +4640,7 @@ }, "CloudFrontDistribution":{ "shape":"StringType", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon CloudFront distribution.

" + "documentation":"

The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider.

" }, "Version":{ "shape":"DomainVersionType", @@ -4644,7 +4695,7 @@ "members":{ "SourceArn":{ "shape":"ArnType", - "documentation":"

The ARN of a verified email address in Amazon SES. Amazon Cognito uses this email address in one of the following ways, depending on the value that you specify for the EmailSendingAccount parameter:

  • If you specify COGNITO_DEFAULT, Amazon Cognito uses this address as the custom FROM address when it emails your users using its built-in email account.

  • If you specify DEVELOPER, Amazon Cognito emails your users with this address by calling Amazon SES on your behalf.

The Region value of the SourceArn parameter must indicate a supported Amazon Web Services Region of your user pool. Typically, the Region in the SourceArn and the user pool Region are the same. For more information, see Amazon SES email configuration regions in the Amazon Cognito Developer Guide.

" + "documentation":"

The ARN of a verified email address or an address from a verified domain in Amazon SES. You can set a SourceArn email from a verified domain only with an API request. You can set a verified email address, but not an address in a verified domain, in the Amazon Cognito console. Amazon Cognito uses the email address that you provide in one of the following ways, depending on the value that you specify for the EmailSendingAccount parameter:

  • If you specify COGNITO_DEFAULT, Amazon Cognito uses this address as the custom FROM address when it emails your users using its built-in email account.

  • If you specify DEVELOPER, Amazon Cognito emails your users with this address by calling Amazon SES on your behalf.

The Region value of the SourceArn parameter must indicate a supported Amazon Web Services Region of your user pool. Typically, the Region in the SourceArn and the user pool Region are the same. For more information, see Amazon SES email configuration regions in the Amazon Cognito Developer Guide.

" }, "ReplyToEmailAddress":{ "shape":"EmailAddressType", @@ -4751,7 +4802,7 @@ "members":{ "FeedbackValue":{ "shape":"FeedbackValueType", - "documentation":"

The event feedback value.

" + "documentation":"

The authentication event feedback value. When you provide a FeedbackValue value of valid, you tell Amazon Cognito that you trust a user session where Amazon Cognito has evaluated some level of risk. When you provide a FeedbackValue value of invalid, you tell Amazon Cognito that you don't trust a user session, or you don't believe that Amazon Cognito evaluated a high-enough risk level.

" }, "Provider":{ "shape":"StringType", @@ -4808,6 +4859,10 @@ }, "documentation":"

The event risk type.

" }, + "EventSourceName":{ + "type":"string", + "enum":["userNotification"] + }, "EventType":{ "type":"string", "enum":[ @@ -5029,6 +5084,25 @@ } } }, + "GetLogDeliveryConfigurationRequest":{ + "type":"structure", + "required":["UserPoolId"], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The ID of the user pool where you want to view detailed activity logging configuration.

" + } + } + }, + "GetLogDeliveryConfigurationResponse":{ + "type":"structure", + "members":{ + "LogDeliveryConfiguration":{ + "shape":"LogDeliveryConfigurationType", + "documentation":"

The detailed activity logging configuration of the requested user pool.

" + } + } + }, "GetSigningCertificateRequest":{ "type":"structure", "required":["UserPoolId"], @@ -5153,7 +5227,7 @@ "members":{ "Username":{ "shape":"UsernameType", - "documentation":"

The user name of the user you want to retrieve from the get user request.

" + "documentation":"

The username of the user that you requested.

" }, "UserAttributes":{ "shape":"AttributeListType", @@ -5234,11 +5308,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"

The date the group was last modified.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "CreationDate":{ "shape":"DateType", - "documentation":"

The date the group was created.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" } }, "documentation":"

The group type.

" @@ -5299,11 +5373,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"

The date the IdP was last modified.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "CreationDate":{ "shape":"DateType", - "documentation":"

The date the IdP was created.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" } }, "documentation":"

A container for information about an IdP.

" @@ -5331,7 +5405,11 @@ "max":50, "min":0 }, - "ImageFileType":{"type":"blob"}, + "ImageFileType":{ + "type":"blob", + "max":131072, + "min":0 + }, "ImageUrlType":{"type":"string"}, "InitiateAuthRequest":{ "type":"structure", @@ -5346,7 +5424,7 @@ }, "AuthParameters":{ "shape":"AuthParametersType", - "documentation":"

The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow:

  • For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value).

" + "documentation":"

The authentication parameters. These are inputs corresponding to the AuthFlow that you're invoking. The required values depend on the value of AuthFlow:

  • For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For USER_PASSWORD_AUTH: USERNAME (required), PASSWORD (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH (required if the app client is configured with a client secret), DEVICE_KEY.

  • For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with client secret), DEVICE_KEY. To start the authentication flow with password verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value).

For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool.

" }, "ClientMetadata":{ "shape":"ClientMetadataType", @@ -5863,7 +5941,7 @@ }, "AttributesToGet":{ "shape":"SearchedAttributeNamesListType", - "documentation":"

An array of strings, where each string is the name of a user attribute to be returned for each user in the search results. If the array is null, all attributes are returned.

" + "documentation":"

A JSON array of user attribute names, for example given_name, that you want Amazon Cognito to include in the response for each user. When you don't provide an AttributesToGet parameter, Amazon Cognito returns all attributes for each user.

" }, "Limit":{ "shape":"QueryLimitType", @@ -5885,7 +5963,7 @@ "members":{ "Users":{ "shape":"UsersListType", - "documentation":"

The users returned in the request to list users.

" + "documentation":"

A list of the user pool users, and their attributes, that match your query.

Amazon Cognito creates a profile in your user pool for each native user in your user pool, and each unique user ID from your third-party identity providers (IdPs). When you link users with the AdminLinkProviderForUser API operation, the output of ListUsers displays both the IdP user and the native user that you linked. You can identify IdP users in the Users object of this API response by the IdP prefix that Amazon Cognito appends to Username.

" }, "PaginationToken":{ "shape":"SearchPaginationTokenType", @@ -5894,6 +5972,56 @@ }, "documentation":"

The response from the request to list users.

" }, + "LogConfigurationListType":{ + "type":"list", + "member":{"shape":"LogConfigurationType"}, + "max":1, + "min":0 + }, + "LogConfigurationType":{ + "type":"structure", + "required":[ + "LogLevel", + "EventSource" + ], + "members":{ + "LogLevel":{ + "shape":"LogLevel", + "documentation":"

The errorlevel selection of logs that a user pool sends for detailed activity logging.

" + }, + "EventSource":{ + "shape":"EventSourceName", + "documentation":"

The source of events that your user pool sends for detailed activity logging.

" + }, + "CloudWatchLogsConfiguration":{ + "shape":"CloudWatchLogsConfigurationType", + "documentation":"

The CloudWatch logging destination of a user pool.

" + } + }, + "documentation":"

The logging parameters of a user pool.

" + }, + "LogDeliveryConfigurationType":{ + "type":"structure", + "required":[ + "UserPoolId", + "LogConfigurations" + ], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The ID of the user pool where you configured detailed activity logging.

" + }, + "LogConfigurations":{ + "shape":"LogConfigurationListType", + "documentation":"

The detailed activity logging destination of a user pool.

" + } + }, + "documentation":"

The logging parameters of a user pool.

" + }, + "LogLevel":{ + "type":"string", + "enum":["ERROR"] + }, "LogoutURLsListType":{ "type":"list", "member":{"shape":"RedirectUrlType"}, @@ -6175,7 +6303,7 @@ }, "CreationDate":{ "shape":"DateType", - "documentation":"

The date the provider was added to the user pool.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" } }, "documentation":"

A container for IdP details.

" @@ -6427,7 +6555,7 @@ }, "ChallengeResponses":{ "shape":"ChallengeResponsesType", - "documentation":"

The challenge responses. These are inputs corresponding to the value of ChallengeName, for example:

SECRET_HASH (if app client is configured with client secret) applies to all of the inputs that follow (including SOFTWARE_TOKEN_MFA).

  • SMS_MFA: SMS_MFA_CODE, USERNAME.

  • PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME.

    PASSWORD_VERIFIER requires DEVICE_KEY when you sign in with a remembered device.

  • NEW_PASSWORD_REQUIRED: NEW_PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required attributes that Amazon Cognito returned as requiredAttributes in the InitiateAuth response, add a userAttributes.attributename parameter. This parameter can also set values for writable attributes that aren't required by your user pool.

    In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the value of any additional attributes.

  • SOFTWARE_TOKEN_MFA: USERNAME and SOFTWARE_TOKEN_MFA_CODE are required attributes.

  • DEVICE_SRP_AUTH requires USERNAME, DEVICE_KEY, SRP_A (and SECRET_HASH).

  • DEVICE_PASSWORD_VERIFIER requires everything that PASSWORD_VERIFIER requires, plus DEVICE_KEY.

  • MFA_SETUP requires USERNAME, plus you must use the session value returned by VerifySoftwareToken in the Session parameter.

" + "documentation":"

The challenge responses. These are inputs corresponding to the value of ChallengeName, for example:

SECRET_HASH (if app client is configured with client secret) applies to all of the inputs that follow (including SOFTWARE_TOKEN_MFA).

  • SMS_MFA: SMS_MFA_CODE, USERNAME.

  • PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME.

    PASSWORD_VERIFIER requires DEVICE_KEY when you sign in with a remembered device.

  • NEW_PASSWORD_REQUIRED: NEW_PASSWORD, USERNAME, SECRET_HASH (if app client is configured with client secret). To set any required attributes that Amazon Cognito returned as requiredAttributes in the InitiateAuth response, add a userAttributes.attributename parameter. This parameter can also set values for writable attributes that aren't required by your user pool.

    In a NEW_PASSWORD_REQUIRED challenge response, you can't modify a required attribute that already has a value. In RespondToAuthChallenge, set a value for any keys that Amazon Cognito returned in the requiredAttributes parameter, then use the UpdateUserAttributes API operation to modify the value of any additional attributes.

  • SOFTWARE_TOKEN_MFA: USERNAME and SOFTWARE_TOKEN_MFA_CODE are required attributes.

  • DEVICE_SRP_AUTH requires USERNAME, DEVICE_KEY, SRP_A (and SECRET_HASH).

  • DEVICE_PASSWORD_VERIFIER requires everything that PASSWORD_VERIFIER requires, plus DEVICE_KEY.

  • MFA_SETUP requires USERNAME, plus you must use the session value returned by VerifySoftwareToken in the Session parameter.

For more information about SECRET_HASH, see Computing secret hash values. For information about DEVICE_KEY, see Working with user devices in your user pool.

" }, "AnalyticsMetadata":{ "shape":"AnalyticsMetadataType", @@ -6517,7 +6645,7 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"

The last modified date.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" } }, "documentation":"

The risk configuration type.

" @@ -6583,11 +6711,11 @@ "members":{ "Name":{ "shape":"CustomAttributeNameType", - "documentation":"

A schema attribute of the name type.

" + "documentation":"

The name of your user pool attribute, for example username or custom:costcenter.

" }, "AttributeDataType":{ "shape":"AttributeDataType", - "documentation":"

The attribute data type.

" + "documentation":"

The data format of the values for your attribute.

" }, "DeveloperOnlyAttribute":{ "shape":"BooleanType", @@ -6596,7 +6724,7 @@ }, "Mutable":{ "shape":"BooleanType", - "documentation":"

Specifies whether the value of the attribute can be changed.

For any user pool attribute that is mapped to an IdP attribute, you must set this parameter to true. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see Specifying Identity Provider Attribute Mappings for Your User Pool.

", + "documentation":"

Specifies whether the value of the attribute can be changed.

Any user pool attribute whose value you map from an IdP attribute must be mutable, with a parameter value of true. Amazon Cognito updates mapped attributes when users sign in to your application through an IdP. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see Specifying Identity Provider Attribute Mappings for Your User Pool.

", "box":true }, "Required":{ @@ -6613,7 +6741,7 @@ "documentation":"

Specifies the constraints for an attribute of the string type.

" } }, - "documentation":"

Contains information about the schema attribute.

" + "documentation":"

A list of the user attributes and their properties in your user pool. The attribute schema contains standard attributes, custom attributes with a custom: prefix, and developer attributes with a dev: prefix. For more information, see User pool attributes.

Developer-only attributes are a legacy feature of user pools, are read-only to all app clients. You can create and update developer-only attributes only with IAM-authenticated API operations. Use app client read/write permissions instead.

" }, "SchemaAttributesListType":{ "type":"list", @@ -6665,8 +6793,33 @@ "SessionType":{ "type":"string", "max":2048, - "min":20, - "sensitive":true + "min":20 + }, + "SetLogDeliveryConfigurationRequest":{ + "type":"structure", + "required":[ + "UserPoolId", + "LogConfigurations" + ], + "members":{ + "UserPoolId":{ + "shape":"UserPoolIdType", + "documentation":"

The ID of the user pool where you want to configure detailed activity logging .

" + }, + "LogConfigurations":{ + "shape":"LogConfigurationListType", + "documentation":"

A collection of all of the detailed activity logging configurations for a user pool.

" + } + } + }, + "SetLogDeliveryConfigurationResponse":{ + "type":"structure", + "members":{ + "LogDeliveryConfiguration":{ + "shape":"LogDeliveryConfigurationType", + "documentation":"

The detailed activity logging configuration that you applied to the requested user pool.

" + } + } }, "SetRiskConfigurationRequest":{ "type":"structure", @@ -6947,8 +7100,7 @@ "type":"string", "max":6, "min":6, - "pattern":"[0-9]+", - "sensitive":true + "pattern":"[0-9]+" }, "SoftwareTokenMfaConfigType":{ "type":"structure", @@ -7051,7 +7203,11 @@ }, "documentation":"

The constraints associated with a string attribute.

" }, - "StringType":{"type":"string"}, + "StringType":{ + "type":"string", + "max":131072, + "min":0 + }, "SupportedIdentityProvidersListType":{ "type":"list", "member":{"shape":"ProviderNameType"} @@ -7112,15 +7268,15 @@ "members":{ "AccessToken":{ "shape":"TimeUnitsType", - "documentation":"

A time unit of seconds, minutes, hours, or days for the value that you set in the AccessTokenValidity parameter. The default AccessTokenValidity time unit is hours.

" + "documentation":"

A time unit of seconds, minutes, hours, or days for the value that you set in the AccessTokenValidity parameter. The default AccessTokenValidity time unit is hours. AccessTokenValidity duration can range from five minutes to one day.

" }, "IdToken":{ "shape":"TimeUnitsType", - "documentation":"

A time unit of seconds, minutes, hours, or days for the value that you set in the IdTokenValidity parameter. The default IdTokenValidity time unit is hours.

" + "documentation":"

A time unit of seconds, minutes, hours, or days for the value that you set in the IdTokenValidity parameter. The default IdTokenValidity time unit is hours. IdTokenValidity duration can range from five minutes to one day.

" }, "RefreshToken":{ "shape":"TimeUnitsType", - "documentation":"

A time unit of seconds, minutes, hours, or days for the value that you set in the RefreshTokenValidity parameter. The default RefreshTokenValidity time unit is days.

" + "documentation":"

A time unit of seconds, minutes, hours, or days for the value that you set in the RefreshTokenValidity parameter. The default RefreshTokenValidity time unit is days. RefreshTokenValidity duration can range from 60 minutes to 10 years.

" } }, "documentation":"

The data type TokenValidityUnits specifies the time units you use when you set the duration of ID, access, and refresh tokens.

" @@ -7172,11 +7328,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"

The last-modified date for the UI customization.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "CreationDate":{ "shape":"DateType", - "documentation":"

The creation date for the UI customization.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" } }, "documentation":"

A container for the UI customization information for a user pool's built-in app UI.

" @@ -7285,7 +7441,7 @@ }, "FeedbackValue":{ "shape":"FeedbackValueType", - "documentation":"

The authentication event feedback value.

" + "documentation":"

The authentication event feedback value. When you provide a FeedbackValue value of valid, you tell Amazon Cognito that you trust a user session where Amazon Cognito has evaluated some level of risk. When you provide a FeedbackValue value of invalid, you tell Amazon Cognito that you don't trust a user session, or you don't believe that Amazon Cognito evaluated a high-enough risk level.

" } } }, @@ -7496,11 +7652,11 @@ }, "IdTokenValidity":{ "shape":"IdTokenValidityType", - "documentation":"

The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.

For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.

The default time unit for AccessTokenValidity in an API request is hours. Valid range is displayed below in seconds.

If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.

" + "documentation":"

The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.

For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.

The default time unit for IdTokenValidity in an API request is hours. Valid range is displayed below in seconds.

If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.

" }, "TokenValidityUnits":{ "shape":"TokenValidityUnitsType", - "documentation":"

The units in which the validity times are represented. The default unit for RefreshToken is days, and the default for ID and access tokens is hours.

" + "documentation":"

The time units you use when you set the duration of ID, access, and refresh tokens. The default unit for RefreshToken is days, and the default for ID and access tokens is hours.

" }, "ReadAttributes":{ "shape":"ClientPermissionListType", @@ -7540,7 +7696,7 @@ }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", - "documentation":"

Set to true if the client is allowed to follow the OAuth protocol when interacting with Amazon Cognito user pools.

" + "documentation":"

Set to true to use OAuth 2.0 features in your user pool app client.

AllowedOAuthFlowsUserPoolClient must be true before you can configure the following features in your app client.

  • CallBackURLs: Callback URLs.

  • LogoutURLs: Sign-out redirect URLs.

  • AllowedOAuthScopes: OAuth 2.0 scopes.

  • AllowedOAuthFlows: Support for authorization code, implicit, and client credentials OAuth 2.0 grants.

To use OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient or UpdateUserPoolClient API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient in a request with the CLI or SDKs, it defaults to false.

" }, "AnalyticsConfiguration":{ "shape":"AnalyticsConfigurationType", @@ -7682,7 +7838,7 @@ }, "UserPoolAddOns":{ "shape":"UserPoolAddOnsType", - "documentation":"

Enables advanced security risk detection. Set the key AdvancedSecurityMode to the value \"AUDIT\".

" + "documentation":"

User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.

For more information, see Adding advanced security to a user pool.

" }, "AccountRecoverySetting":{ "shape":"AccountRecoverySettingType", @@ -7702,7 +7858,7 @@ "members":{ "AttributesRequireVerificationBeforeUpdate":{ "shape":"AttributesRequireVerificationBeforeUpdateType", - "documentation":"

Requires that your user verifies their email address, phone number, or both before Amazon Cognito updates the value of that attribute. When you update a user attribute that has this option activated, Amazon Cognito sends a verification message to the new phone number or email address. Amazon Cognito doesn’t change the value of the attribute until your user responds to the verification message and confirms the new value.

You can verify an updated email address or phone number with a VerifyUserAttribute API request. You can also call the UpdateUserAttributes or AdminUpdateUserAttributes API and set email_verified or phone_number_verified to true.

When AttributesRequireVerificationBeforeUpdate is false, your user pool doesn't require that your users verify attribute changes before Amazon Cognito updates them. In a user pool where AttributesRequireVerificationBeforeUpdate is false, API operations that change attribute values can immediately update a user’s email or phone_number attribute.

" + "documentation":"

Requires that your user verifies their email address, phone number, or both before Amazon Cognito updates the value of that attribute. When you update a user attribute that has this option activated, Amazon Cognito sends a verification message to the new phone number or email address. Amazon Cognito doesn’t change the value of the attribute until your user responds to the verification message and confirms the new value.

You can verify an updated email address or phone number with a VerifyUserAttribute API request. You can also call the AdminUpdateUserAttributes API and set email_verified or phone_number_verified to true.

When AttributesRequireVerificationBeforeUpdate is false, your user pool doesn't require that your users verify attribute changes before Amazon Cognito updates them. In a user pool where AttributesRequireVerificationBeforeUpdate is false, API operations that change attribute values can immediately update a user’s email or phone_number attribute.

" } }, "documentation":"

The settings for updates to user attributes. These settings include the property AttributesRequireVerificationBeforeUpdate, a user-pool setting that tells Amazon Cognito how to handle changes to the value of your users' email address and phone number attributes. For more information, see Verifying updates to email addresses and phone numbers.

" @@ -7719,8 +7875,7 @@ "documentation":"

Encoded device-fingerprint details that your app collected with the Amazon Cognito context data collection library. For more information, see Adding user device and session data to API requests.

" } }, - "documentation":"

Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

", - "sensitive":true + "documentation":"

Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

" }, "UserFilterType":{ "type":"string", @@ -7783,7 +7938,7 @@ }, "CreationDate":{ "shape":"DateType", - "documentation":"

The date the user import job was created.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" }, "StartDate":{ "shape":"DateType", @@ -7877,10 +8032,10 @@ "members":{ "AdvancedSecurityMode":{ "shape":"AdvancedSecurityModeType", - "documentation":"

The advanced security mode.

" + "documentation":"

The operating mode of advanced security features in your user pool.

" } }, - "documentation":"

The user pool add-ons type.

" + "documentation":"

User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.

For more information, see Adding advanced security to a user pool.

" }, "UserPoolClientDescription":{ "type":"structure", @@ -7925,11 +8080,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"

The date the user pool client was last modified.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "CreationDate":{ "shape":"DateType", - "documentation":"

The date the user pool client was created.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" }, "RefreshTokenValidity":{ "shape":"RefreshTokenValidityType", @@ -7941,7 +8096,7 @@ }, "IdTokenValidity":{ "shape":"IdTokenValidityType", - "documentation":"

The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.

For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.

The default time unit for AccessTokenValidity in an API request is hours. Valid range is displayed below in seconds.

If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.

" + "documentation":"

The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for IdTokenValidity as seconds, minutes, hours, or days, set a TokenValidityUnits value in your API request.

For example, when you set IdTokenValidity as 10 and TokenValidityUnits as hours, your user can authenticate their session with their ID token for 10 hours.

The default time unit for IdTokenValidity in an API request is hours. Valid range is displayed below in seconds.

If you don't specify otherwise in the configuration of your app client, your ID tokens are valid for one hour.

" }, "TokenValidityUnits":{ "shape":"TokenValidityUnitsType", @@ -7985,7 +8140,7 @@ }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", - "documentation":"

Set to true if the client is allowed to follow the OAuth protocol when interacting with Amazon Cognito user pools.

", + "documentation":"

Set to true to use OAuth 2.0 features in your user pool app client.

AllowedOAuthFlowsUserPoolClient must be true before you can configure the following features in your app client.

  • CallBackURLs: Callback URLs.

  • LogoutURLs: Sign-out redirect URLs.

  • AllowedOAuthScopes: OAuth 2.0 scopes.

  • AllowedOAuthFlows: Support for authorization code, implicit, and client credentials OAuth 2.0 grants.

To use OAuth 2.0 features, configure one of these features in the Amazon Cognito console or set AllowedOAuthFlowsUserPoolClient to true in a CreateUserPoolClient or UpdateUserPoolClient API request. If you don't set a value for AllowedOAuthFlowsUserPoolClient in a request with the CLI or SDKs, it defaults to false.

", "box":true }, "AnalyticsConfiguration":{ @@ -8032,11 +8187,11 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"

The date the user pool description was last modified.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "CreationDate":{ "shape":"DateType", - "documentation":"

The date the user pool description was created.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" } }, "documentation":"

A user pool description.

" @@ -8121,15 +8276,15 @@ }, "LastModifiedDate":{ "shape":"DateType", - "documentation":"

The date the user pool was last modified.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "CreationDate":{ "shape":"DateType", - "documentation":"

The date the user pool was created.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was created.

" }, "SchemaAttributes":{ "shape":"SchemaAttributesListType", - "documentation":"

A container with the schema attributes of a user pool.

" + "documentation":"

A list of the user attributes and their properties in your user pool. The attribute schema contains standard attributes, custom attributes with a custom: prefix, and developer attributes with a dev: prefix. For more information, see User pool attributes.

Developer-only attributes are a legacy feature of user pools, are read-only to all app clients. You can create and update developer-only attributes only with IAM-authenticated API operations. Use app client read/write permissions instead.

" }, "AutoVerifiedAttributes":{ "shape":"VerifiedAttributesListType", @@ -8181,7 +8336,7 @@ }, "EmailConfiguration":{ "shape":"EmailConfigurationType", - "documentation":"

The email configuration of your user pool. The email configuration type sets your preferred sending method, Amazon Web Services Region, and sender for messages tfrom your user pool.

" + "documentation":"

The email configuration of your user pool. The email configuration type sets your preferred sending method, Amazon Web Services Region, and sender for messages from your user pool.

" }, "SmsConfiguration":{ "shape":"SmsConfigurationType", @@ -8193,7 +8348,7 @@ }, "SmsConfigurationFailure":{ "shape":"StringType", - "documentation":"

The reason why the SMS configuration can't send the messages to your users.

This message might include comma-separated values to describe why your SMS configuration can't send messages to user pool end users.

InvalidSmsRoleAccessPolicyException

The Identity and Access Management role that Amazon Cognito uses to send SMS messages isn't properly configured. For more information, see SmsConfigurationType.

SNSSandbox

The Amazon Web Services account is in the SNS SMS Sandbox and messages will only reach verified end users. This parameter won’t get populated with SNSSandbox if the IAM user creating the user pool doesn’t have SNS permissions. To learn how to move your Amazon Web Services account out of the sandbox, see Moving out of the SMS sandbox.

" + "documentation":"

The reason why the SMS configuration can't send the messages to your users.

This message might include comma-separated values to describe why your SMS configuration can't send messages to user pool end users.

InvalidSmsRoleAccessPolicyException

The Identity and Access Management role that Amazon Cognito uses to send SMS messages isn't properly configured. For more information, see SmsConfigurationType.

SNSSandbox

The Amazon Web Services account is in the SNS SMS Sandbox and messages will only reach verified end users. This parameter won’t get populated with SNSSandbox if the user creating the user pool doesn’t have SNS permissions. To learn how to move your Amazon Web Services account out of the sandbox, see Moving out of the SMS sandbox.

" }, "EmailConfigurationFailure":{ "shape":"StringType", @@ -8213,7 +8368,7 @@ }, "UserPoolAddOns":{ "shape":"UserPoolAddOnsType", - "documentation":"

The user pool add-ons.

" + "documentation":"

User pool add-ons. Contains settings for activation of advanced security features. To log user security information but take no action, set to AUDIT. To configure automatic security responses to risky traffic to your user pool, set to ENFORCED.

For more information, see Adding advanced security to a user pool.

" }, "UsernameConfiguration":{ "shape":"UsernameConfigurationType", @@ -8259,7 +8414,7 @@ }, "UserLastModifiedDate":{ "shape":"DateType", - "documentation":"

The last modified date of the user.

" + "documentation":"

The date and time, in ISO 8601 format, when the item was modified.

" }, "Enabled":{ "shape":"BooleanType", @@ -8267,7 +8422,7 @@ }, "UserStatus":{ "shape":"UserStatusType", - "documentation":"

The user status. This can be one of the following:

  • UNCONFIRMED - User has been created but not confirmed.

  • CONFIRMED - User has been confirmed.

  • EXTERNAL_PROVIDER - User signed in with a third-party IdP.

  • ARCHIVED - User is no longer active.

  • UNKNOWN - User status isn't known.

  • RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in.

  • FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else.

" + "documentation":"

The user status. This can be one of the following:

  • UNCONFIRMED - User has been created but not confirmed.

  • CONFIRMED - User has been confirmed.

  • EXTERNAL_PROVIDER - User signed in with a third-party IdP.

  • UNKNOWN - User status isn't known.

  • RESET_REQUIRED - User is confirmed, but the user must request a code and reset their password before they can sign in.

  • FORCE_CHANGE_PASSWORD - The user is confirmed and the user can sign in using a temporary password, but on first sign-in, the user must change their password to a new value before doing anything else.

" }, "MFAOptions":{ "shape":"MFAOptionListType", @@ -8293,7 +8448,7 @@ "members":{ "CaseSensitive":{ "shape":"WrappedBooleanType", - "documentation":"

Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs.

Valid values include:

True

Enables case sensitivity for all username input. When this option is set to True, users must sign in using the exact capitalization of their given username, such as “UserName”. This is the default value.

False

Enables case insensitivity for all username input. For example, when this option is set to False, users can sign in using either \"username\" or \"Username\". This option also enables both preferred_username and email alias to be case insensitive, in addition to the username attribute.

" + "documentation":"

Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to False (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.

Valid values include:

True

Enables case sensitivity for all username input. When this option is set to True, users must sign in using the exact capitalization of their given username, such as “UserName”. This is the default value.

False

Enables case insensitivity for all username input. For example, when this option is set to False, users can sign in using username, USERNAME, or UserName. This option also enables both preferred_username and email alias to be case insensitive, in addition to the username attribute.

" } }, "documentation":"

The username configuration type.

" @@ -8434,5 +8589,5 @@ }, "WrappedBooleanType":{"type":"boolean"} }, - "documentation":"

Using the Amazon Cognito user pools API, you can create a user pool to manage directories and users. You can authenticate a user to obtain tokens related to user identity and access policies.

This API reference provides information about user pools in Amazon Cognito user pools.

For more information, see the Amazon Cognito Documentation.

" + "documentation":"

With the Amazon Cognito user pools API, you can set up user pools and app clients, and authenticate users. To authenticate users from third-party identity providers (IdPs) in this API, you can link IdP users to native user profiles. Learn more about the authentication and authorization of federated users in the Using the Amazon Cognito user pools API and user pool endpoints.

This API reference provides detailed information about API operations and object types in Amazon Cognito. At the bottom of the page for each API operation and object, under See Also, you can learn how to use it in an Amazon Web Services SDK in the language of your choice.

Along with resource management operations, the Amazon Cognito user pools API includes classes of operations and authorization models for client-side and server-side user operations. For more information, see Using the Amazon Cognito native and OIDC APIs in the Amazon Cognito Developer Guide.

You can also start reading about the CognitoIdentityProvider client in the following SDK guides.

To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services SDKs.

" } diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index edda0624437..fab179973a3 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index cb3f16a28a7..f873a7dc5c5 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index a0c6456d6a4..8881a0c3794 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index 30dae660e42..680c7793002 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/config/pom.xml b/services/config/pom.xml index 3f45dccac04..650239e3d8a 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/config/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/config/src/main/resources/codegen-resources/endpoint-rule-set.json index 3c1dcac5c67..bc269536957 100644 --- a/services/config/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/config/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://config-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://config-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://config.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://config-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://config.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://config-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://config.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://config.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://config.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://config.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/config/src/main/resources/codegen-resources/service-2.json b/services/config/src/main/resources/codegen-resources/service-2.json index 5b5ffb3fda3..07e42d5c60f 100644 --- a/services/config/src/main/resources/codegen-resources/service-2.json +++ b/services/config/src/main/resources/codegen-resources/service-2.json @@ -7477,7 +7477,42 @@ "AWS::SageMaker::Domain", "AWS::Transfer::Agreement", "AWS::Transfer::Connector", - "AWS::KinesisFirehose::DeliveryStream" + "AWS::KinesisFirehose::DeliveryStream", + "AWS::Amplify::Branch", + "AWS::AppIntegrations::EventIntegration", + "AWS::AppMesh::Route", + "AWS::Athena::PreparedStatement", + "AWS::EC2::IPAMScope", + "AWS::Evidently::Launch", + "AWS::Forecast::DatasetGroup", + "AWS::GreengrassV2::ComponentVersion", + "AWS::GroundStation::MissionProfile", + "AWS::MediaConnect::FlowEntitlement", + "AWS::MediaConnect::FlowVpcInterface", + "AWS::MediaTailor::PlaybackConfiguration", + "AWS::MSK::Configuration", + "AWS::Personalize::Dataset", + "AWS::Personalize::Schema", + "AWS::Personalize::Solution", + "AWS::Pinpoint::EmailTemplate", + "AWS::Pinpoint::EventStream", + "AWS::ResilienceHub::App", + "AWS::ACMPCA::CertificateAuthority", + "AWS::AppConfig::HostedConfigurationVersion", + "AWS::AppMesh::VirtualGateway", + "AWS::AppMesh::VirtualRouter", + "AWS::AppRunner::Service", + "AWS::CustomerProfiles::ObjectType", + "AWS::DMS::Endpoint", + "AWS::EC2::CapacityReservation", + "AWS::EC2::ClientVpnEndpoint", + "AWS::Kendra::Index", + "AWS::KinesisVideo::Stream", + "AWS::Logs::Destination", + "AWS::Pinpoint::EmailChannel", + "AWS::S3::AccessPoint", + "AWS::NetworkManager::CustomerGatewayAssociation", + "AWS::NetworkManager::LinkAssociation" ] }, "ResourceTypeList":{ diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 91828f6f999..1470a73a512 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/connect/src/main/resources/codegen-resources/endpoint-rule-set.json index 57834595dab..1f6adf2f2f3 100644 --- a/services/connect/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/connect/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://connect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://connect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://connect.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://connect-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://connect.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://connect-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://connect.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://connect.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://connect.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://connect.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/connect/src/main/resources/codegen-resources/paginators-1.json b/services/connect/src/main/resources/codegen-resources/paginators-1.json index 739a54e57da..a4012e4c41c 100644 --- a/services/connect/src/main/resources/codegen-resources/paginators-1.json +++ b/services/connect/src/main/resources/codegen-resources/paginators-1.json @@ -198,6 +198,12 @@ "output_token": "NextToken", "result_key": "TaskTemplates" }, + "ListTrafficDistributionGroupUsers": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "TrafficDistributionGroupUserSummaryList" + }, "ListTrafficDistributionGroups": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index 99d6c7c7e89..a3a1d08c02a 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -205,6 +205,25 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Associates a security key to the instance.

" }, + "AssociateTrafficDistributionGroupUser":{ + "name":"AssociateTrafficDistributionGroupUser", + "http":{ + "method":"PUT", + "requestUri":"/traffic-distribution-group/{TrafficDistributionGroupId}/user" + }, + "input":{"shape":"AssociateTrafficDistributionGroupUserRequest"}, + "output":{"shape":"AssociateTrafficDistributionGroupUserResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Associates an agent with a traffic distribution group.

", + "idempotent":true + }, "ClaimPhoneNumber":{ "name":"ClaimPhoneNumber", "http":{ @@ -1445,6 +1464,25 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Deletes the specified security key.

" }, + "DisassociateTrafficDistributionGroupUser":{ + "name":"DisassociateTrafficDistributionGroupUser", + "http":{ + "method":"DELETE", + "requestUri":"/traffic-distribution-group/{TrafficDistributionGroupId}/user" + }, + "input":{"shape":"DisassociateTrafficDistributionGroupUserRequest"}, + "output":{"shape":"DisassociateTrafficDistributionGroupUserResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Disassociates an agent from a traffic distribution group.

", + "idempotent":true + }, "DismissUserContact":{ "name":"DismissUserContact", "http":{ @@ -1928,7 +1966,7 @@ {"shape":"InternalServiceException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists phone numbers claimed to your Amazon Connect instance or traffic distribution group. If the provided TargetArn is a traffic distribution group, you can call this API in both Amazon Web Services Regions associated with traffic distribution group.

For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center in the Amazon Connect Administrator Guide.

" + "documentation":"

Lists phone numbers claimed to your Amazon Connect instance or traffic distribution group. If the provided TargetArn is a traffic distribution group, you can call this API in both Amazon Web Services Regions associated with traffic distribution group.

For more information about phone numbers, see Set Up Phone Numbers for Your Contact Center in the Amazon Connect Administrator Guide.

  • When given an instance ARN, ListPhoneNumbersV2 returns only the phone numbers claimed to the instance.

  • When given a traffic distribution group ARN ListPhoneNumbersV2 returns only the phone numbers claimed to the traffic distribution group.

" }, "ListPrompts":{ "name":"ListPrompts", @@ -2134,6 +2172,23 @@ ], "documentation":"

Lists task templates for the specified Amazon Connect instance.

" }, + "ListTrafficDistributionGroupUsers":{ + "name":"ListTrafficDistributionGroupUsers", + "http":{ + "method":"GET", + "requestUri":"/traffic-distribution-group/{TrafficDistributionGroupId}/user" + }, + "input":{"shape":"ListTrafficDistributionGroupUsersRequest"}, + "output":{"shape":"ListTrafficDistributionGroupUsersResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Lists traffic distribution group users.

" + }, "ListTrafficDistributionGroups":{ "name":"ListTrafficDistributionGroups", "http":{ @@ -3108,6 +3163,22 @@ ], "documentation":"

Updates the name and description of a quick connect. The request accepts the following data in JSON format. At least Name or Description must be provided.

" }, + "UpdateRoutingProfileAgentAvailabilityTimer":{ + "name":"UpdateRoutingProfileAgentAvailabilityTimer", + "http":{ + "method":"POST", + "requestUri":"/routing-profiles/{InstanceId}/{RoutingProfileId}/agent-availability-timer" + }, + "input":{"shape":"UpdateRoutingProfileAgentAvailabilityTimerRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.

" + }, "UpdateRoutingProfileConcurrency":{ "name":"UpdateRoutingProfileConcurrency", "http":{ @@ -3240,7 +3311,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates the traffic distribution for a given traffic distribution group.

For more information about updating a traffic distribution group, see Update telephony traffic distribution across Amazon Web Services Regions in the Amazon Connect Administrator Guide.

" + "documentation":"

Updates the traffic distribution for a given traffic distribution group.

You can change the SignInConfig only for a default TrafficDistributionGroup. If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned.

For more information about updating a traffic distribution group, see Update telephony traffic distribution across Amazon Web Services Regions in the Amazon Connect Administrator Guide.

" }, "UpdateUserHierarchy":{ "name":"UpdateUserHierarchy", @@ -3444,6 +3515,24 @@ "type":"integer", "min":0 }, + "AgentAvailabilityTimer":{ + "type":"string", + "enum":[ + "TIME_SINCE_LAST_ACTIVITY", + "TIME_SINCE_LAST_INBOUND" + ] + }, + "AgentConfig":{ + "type":"structure", + "required":["Distributions"], + "members":{ + "Distributions":{ + "shape":"DistributionList", + "documentation":"

Information about traffic distributions.

" + } + }, + "documentation":"

The distribution of agents between the instance and its replica(s).

" + }, "AgentContactReference":{ "type":"structure", "members":{ @@ -3915,6 +4004,35 @@ } } }, + "AssociateTrafficDistributionGroupUserRequest":{ + "type":"structure", + "required":[ + "TrafficDistributionGroupId", + "UserId", + "InstanceId" + ], + "members":{ + "TrafficDistributionGroupId":{ + "shape":"TrafficDistributionGroupIdOrArn", + "documentation":"

The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.

", + "location":"uri", + "locationName":"TrafficDistributionGroupId" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The identifier of the user account. This can be the ID or the ARN of the user.

" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

" + } + } + }, + "AssociateTrafficDistributionGroupUserResponse":{ + "type":"structure", + "members":{ + } + }, "AssociationId":{ "type":"string", "max":100, @@ -5159,6 +5277,10 @@ "Tags":{ "shape":"TagMap", "documentation":"

The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + }, + "AgentAvailabilityTimer":{ + "shape":"AgentAvailabilityTimer", + "documentation":"

Whether agents with this routing profile will have their routing order calculated based on longest idle time or time since their last inbound contact.

" } } }, @@ -7196,6 +7318,39 @@ } } }, + "DisassociateTrafficDistributionGroupUserRequest":{ + "type":"structure", + "required":[ + "TrafficDistributionGroupId", + "UserId", + "InstanceId" + ], + "members":{ + "TrafficDistributionGroupId":{ + "shape":"TrafficDistributionGroupIdOrArn", + "documentation":"

The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.

", + "location":"uri", + "locationName":"TrafficDistributionGroupId" + }, + "UserId":{ + "shape":"UserId", + "documentation":"

The identifier for the user. This can be the ID or the ARN of the user.

", + "location":"querystring", + "locationName":"UserId" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "location":"querystring", + "locationName":"InstanceId" + } + } + }, + "DisassociateTrafficDistributionGroupUserResponse":{ + "type":"structure", + "members":{ + } + }, "DismissUserContactRequest":{ "type":"structure", "required":[ @@ -8393,7 +8548,7 @@ }, "UserId":{ "shape":"AgentResourceId", - "documentation":"

The identifier for the user.

" + "documentation":"

The identifier for the user. This can be the ID or the ARN of the user.

" } } }, @@ -8660,6 +8815,14 @@ "Arn":{ "shape":"TrafficDistributionGroupArn", "documentation":"

The Amazon Resource Name (ARN) of the traffic distribution group.

" + }, + "SignInConfig":{ + "shape":"SignInConfig", + "documentation":"

The distribution of allowing signing in to the instance and its replica(s).

" + }, + "AgentConfig":{ + "shape":"AgentConfig", + "documentation":"

The distribution of agents between the instance and its replica(s).

" } } }, @@ -10971,6 +11134,44 @@ } } }, + "ListTrafficDistributionGroupUsersRequest":{ + "type":"structure", + "required":["TrafficDistributionGroupId"], + "members":{ + "TrafficDistributionGroupId":{ + "shape":"TrafficDistributionGroupIdOrArn", + "documentation":"

The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.

", + "location":"uri", + "locationName":"TrafficDistributionGroupId" + }, + "MaxResults":{ + "shape":"MaxResult10", + "documentation":"

The maximum number of results to return per page.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListTrafficDistributionGroupUsersResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + }, + "TrafficDistributionGroupUserSummaryList":{ + "shape":"TrafficDistributionGroupUserSummaryList", + "documentation":"

A list of traffic distribution group users.

" + } + } + }, "ListTrafficDistributionGroupsRequest":{ "type":"structure", "members":{ @@ -11937,13 +12138,17 @@ "type":"string", "enum":[ "TOLL_FREE", - "DID" + "DID", + "UIFN", + "SHARED", + "THIRD_PARTY_TF", + "THIRD_PARTY_DID" ] }, "PhoneNumberTypes":{ "type":"list", "member":{"shape":"PhoneNumberType"}, - "max":2 + "max":6 }, "PhoneNumberWorkflowMessage":{ "type":"string", @@ -12851,6 +13056,10 @@ "NumberOfAssociatedUsers":{ "shape":"Long", "documentation":"

The number of associated users in routing profile.

" + }, + "AgentAvailabilityTimer":{ + "shape":"AgentAvailabilityTimer", + "documentation":"

Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.

" } }, "documentation":"

Contains information about a routing profile.

" @@ -13897,6 +14106,39 @@ "error":{"httpStatusCode":402}, "exception":true }, + "SignInConfig":{ + "type":"structure", + "required":["Distributions"], + "members":{ + "Distributions":{ + "shape":"SignInDistributionList", + "documentation":"

Information about traffic distributions.

" + } + }, + "documentation":"

The distribution of allowing signing in to the instance and its replica(s).

" + }, + "SignInDistribution":{ + "type":"structure", + "required":[ + "Region", + "Enabled" + ], + "members":{ + "Region":{ + "shape":"AwsRegion", + "documentation":"

The Amazon Web Services Region of the sign in distribution.

" + }, + "Enabled":{ + "shape":"Boolean", + "documentation":"

Whether sign in distribution is enabled.

" + } + }, + "documentation":"

The distribution of sign in traffic between the instance and its replica(s).

" + }, + "SignInDistributionList":{ + "type":"list", + "member":{"shape":"SignInDistribution"} + }, "SingleSelectOptions":{ "type":"list", "member":{"shape":"TaskTemplateSingleSelectOption"} @@ -14936,6 +15178,10 @@ "Tags":{ "shape":"TagMap", "documentation":"

The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + }, + "IsDefault":{ + "shape":"Boolean", + "documentation":"

Whether this is the default traffic distribution group created during instance replication. The default traffic distribution group cannot be deleted by the DeleteTrafficDistributionGroup API. The default traffic distribution group is deleted as part of the process for deleting a replica.

You can change the SignInConfig only for a default TrafficDistributionGroup. If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned.

" } }, "documentation":"

Information about a traffic distribution group.

" @@ -14985,6 +15231,10 @@ "Status":{ "shape":"TrafficDistributionGroupStatus", "documentation":"

The status of the traffic distribution group.

" + }, + "IsDefault":{ + "shape":"Boolean", + "documentation":"

Whether this is the default traffic distribution group created during instance replication. The default traffic distribution group cannot be deleted by the DeleteTrafficDistributionGroup API. The default traffic distribution group is deleted as part of the process for deleting a replica.

" } }, "documentation":"

Information about traffic distribution groups.

" @@ -14995,6 +15245,22 @@ "max":10, "min":0 }, + "TrafficDistributionGroupUserSummary":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"UserId", + "documentation":"

The identifier for the user. This can be the ID or the ARN of the user.

" + } + }, + "documentation":"

Summary information about a traffic distribution group user.

" + }, + "TrafficDistributionGroupUserSummaryList":{ + "type":"list", + "member":{"shape":"TrafficDistributionGroupUserSummary"}, + "max":10, + "min":0 + }, "TrafficType":{ "type":"string", "enum":[ @@ -15024,7 +15290,7 @@ }, "UserId":{ "shape":"AgentResourceId", - "documentation":"

The identifier for the user.

" + "documentation":"

The identifier for the user. This can be the ID or the ARN of the user.

" }, "ContactFlowId":{ "shape":"ContactFlowId", @@ -15932,6 +16198,32 @@ } } }, + "UpdateRoutingProfileAgentAvailabilityTimerRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "RoutingProfileId", + "AgentAvailabilityTimer" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "RoutingProfileId":{ + "shape":"RoutingProfileId", + "documentation":"

The identifier of the routing profile.

", + "location":"uri", + "locationName":"RoutingProfileId" + }, + "AgentAvailabilityTimer":{ + "shape":"AgentAvailabilityTimer", + "documentation":"

Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time.

" + } + } + }, "UpdateRoutingProfileConcurrencyRequest":{ "type":"structure", "required":[ @@ -16232,6 +16524,14 @@ "TelephonyConfig":{ "shape":"TelephonyConfig", "documentation":"

The distribution of traffic between the instance and its replica(s).

" + }, + "SignInConfig":{ + "shape":"SignInConfig", + "documentation":"

The distribution of allowing signing in to the instance and its replica(s).

" + }, + "AgentConfig":{ + "shape":"AgentConfig", + "documentation":"

The distribution of agents between the instance and its replica(s).

" } } }, diff --git a/services/connectcampaigns/pom.xml b/services/connectcampaigns/pom.xml index 855a9f52451..c90fe4a77fb 100644 --- a/services/connectcampaigns/pom.xml +++ b/services/connectcampaigns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT connectcampaigns AWS Java SDK :: Services :: Connect Campaigns diff --git a/services/connectcases/pom.xml b/services/connectcases/pom.xml index f0e71faeebf..afaa98d72c9 100644 --- a/services/connectcases/pom.xml +++ b/services/connectcases/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT connectcases AWS Java SDK :: Services :: Connect Cases diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index 4b0036ed1b6..bdb8ac1a52c 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 1ac837ff8cf..0db6fa4b853 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/controltower/pom.xml b/services/controltower/pom.xml index 63c6f1bb89b..99f800c9bb0 100644 --- a/services/controltower/pom.xml +++ b/services/controltower/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT controltower AWS Java SDK :: Services :: Control Tower diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index f85f0639603..07a3642acc5 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index eaef2b3b544..d0e7f59dbd1 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 costexplorer diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json index 09cea2bc7d5..e4c99e99633 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,64 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws" + "ref": "Region" + } ] } ], @@ -128,22 +111,13 @@ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -151,556 +125,303 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://ce.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] + }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://ce.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "cn-northwest-1" } ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer-fips.{Region}.api.aws", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] } ] }, - "headers": {} - }, - "type": "endpoint" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer-fips.{Region}.amazonaws.com", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] } ] - }, - "headers": {} - }, - "type": "endpoint" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer.{Region}.api.aws", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] } ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://ce.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer-fips.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" + "conditions": [], + "endpoint": { + "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cost-explorer-fips.{Region}.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [], - "endpoint": { - "url": "https://cost-explorer.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://ce.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [], "type": "tree", @@ -708,7 +429,7 @@ { "conditions": [], "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -717,134 +438,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://ce.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://ce.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "ce" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json index b733909b76d..26d783f3157 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,5 +1,88 @@ { "testCases": [ + { + "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://ce.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://ce.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", "expect": { @@ -7,9 +90,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "cn-northwest-1", "name": "sigv4", - "signingName": "ce" + "signingName": "ce", + "signingRegion": "cn-northwest-1" } ] }, @@ -17,35 +100,236 @@ } }, "params": { + "Region": "aws-cn-global", "UseFIPS": false, - "UseDualStack": false, - "Region": "aws-cn-global" + "UseDualStack": false } }, { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "name": "sigv4", - "signingName": "ce" + "signingName": "ce", + "signingRegion": "cn-northwest-1" } ] }, - "url": "https://ce.us-east-1.amazonaws.com" + "url": "https://ce.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ce.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ce.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "aws-global" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -54,7 +338,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -64,9 +347,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -76,11 +359,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/costexplorer/src/main/resources/codegen-resources/service-2.json b/services/costexplorer/src/main/resources/codegen-resources/service-2.json index 42715b57eeb..37fbde371ed 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/service-2.json +++ b/services/costexplorer/src/main/resources/codegen-resources/service-2.json @@ -296,6 +296,20 @@ ], "documentation":"

Creates recommendations that help you save cost by identifying idle and underutilized Amazon EC2 instances.

Recommendations are generated to either downsize or terminate instances, along with providing savings detail and metrics. For more information about calculation and function, see Optimizing Your Cost with Rightsizing Recommendations in the Billing and Cost Management User Guide.

" }, + "GetSavingsPlanPurchaseRecommendationDetails":{ + "name":"GetSavingsPlanPurchaseRecommendationDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSavingsPlanPurchaseRecommendationDetailsRequest"}, + "output":{"shape":"GetSavingsPlanPurchaseRecommendationDetailsResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"DataUnavailableException"} + ], + "documentation":"

Retrieves the details for a Savings Plan recommendation. These details include the hourly data-points that construct the new cost, coverage, and utilization charts.

" + }, "GetSavingsPlansCoverage":{ "name":"GetSavingsPlansCoverage", "http":{ @@ -423,7 +437,8 @@ "output":{"shape":"ListSavingsPlansPurchaseRecommendationGenerationResponse"}, "errors":[ {"shape":"LimitExceededException"}, - {"shape":"InvalidNextTokenException"} + {"shape":"InvalidNextTokenException"}, + {"shape":"DataUnavailableException"} ], "documentation":"

Retrieves a list of your historical recommendation generations within the past 30 days.

" }, @@ -465,7 +480,8 @@ "errors":[ {"shape":"LimitExceededException"}, {"shape":"ServiceQuotaExceededException"}, - {"shape":"GenerationExistsException"} + {"shape":"GenerationExistsException"}, + {"shape":"DataUnavailableException"} ], "documentation":"

Requests a Savings Plans recommendation generation. This enables you to calculate a fresh set of Savings Plans recommendations that takes your latest usage data and current Savings Plans inventory into account. You can refresh Savings Plans recommendations up to three times daily for a consolidated billing family.

StartSavingsPlansPurchaseRecommendationGeneration has no request syntax because no input parameters are needed to support this operation.

" }, @@ -525,7 +541,7 @@ {"shape":"UnknownMonitorException"}, {"shape":"UnknownSubscriptionException"} ], - "documentation":"

Updates an existing cost anomaly monitor subscription.

" + "documentation":"

Updates an existing cost anomaly subscription. Specify the fields that you want to update. Omitted fields are unchanged.

The JSON below describes the generic construct for each type. See Request Parameters for possible values as they apply to AnomalySubscription.

" }, "UpdateCostAllocationTagsStatus":{ "name":"UpdateCostAllocationTagsStatus", @@ -733,13 +749,13 @@ }, "Threshold":{ "shape":"NullableNonNegativeDouble", - "documentation":"

(deprecated)

The dollar value that triggers a notification if the threshold is exceeded.

This field has been deprecated. To specify a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.

One of Threshold or ThresholdExpression is required for this resource.

", + "documentation":"

(deprecated)

An absolute dollar value that must be exceeded by the anomaly's total impact (see Impact for more details) for an anomaly notification to be generated.

This field has been deprecated. To specify a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.

One of Threshold or ThresholdExpression is required for this resource. You cannot specify both.

", "deprecated":true, "deprecatedMessage":"Threshold has been deprecated in favor of ThresholdExpression" }, "Frequency":{ "shape":"AnomalySubscriptionFrequency", - "documentation":"

The frequency that anomaly reports are sent over email.

" + "documentation":"

The frequency that anomaly notifications are sent. Notifications are sent either over email (for DAILY and WEEKLY frequencies) or SNS (for IMMEDIATE frequency). For more information, see Creating an Amazon SNS topic for anomaly notifications.

" }, "SubscriptionName":{ "shape":"GenericString", @@ -747,10 +763,10 @@ }, "ThresholdExpression":{ "shape":"Expression", - "documentation":"

An Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000.

One of Threshold or ThresholdExpression is required for this resource.

The following are examples of valid ThresholdExpressions:

  • Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }

  • Percentage threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }

  • AND two thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }

  • OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }

" + "documentation":"

An Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE, corresponding to an anomaly’s TotalImpact and TotalImpactPercentage, respectively (see Impact for more details). The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000 in string format.

One of Threshold or ThresholdExpression is required for this resource. You cannot specify both.

The following are examples of valid ThresholdExpressions:

  • Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }

  • Percentage threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }

  • AND two thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }

  • OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }

" } }, - "documentation":"

The association between a monitor, threshold, and list of subscribers used to deliver notifications about anomalies detected by a monitor that exceeds a threshold. The content consists of the detailed metadata and the current status of the AnomalySubscription object.

" + "documentation":"

An AnomalySubscription resource (also referred to as an alert subscription) sends notifications about specific anomalies that meet an alerting criteria defined by you.

You can specify the frequency of the alerts and the subscribers to notify.

Anomaly subscriptions can be associated with one or more AnomalyMonitor resources, and they only send notifications about anomalies detected by those associated monitors. You can also configure a threshold to further control which anomalies are included in the notifications.

Anomalies that don’t exceed the chosen threshold and therefore don’t trigger notifications from an anomaly subscription will still be available on the console and from the GetAnomalies API.

" }, "AnomalySubscriptionFrequency":{ "type":"string", @@ -1008,7 +1024,7 @@ "Value":{"shape":"CostCategoryValue"}, "Rule":{ "shape":"Expression", - "documentation":"

An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, and LINKED_ACCOUNT_NAME.

Root level OR isn't supported. We recommend that you create a separate rule instead.

RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.

" + "documentation":"

An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, LINKED_ACCOUNT_NAME, REGION, and USAGE_TYPE.

RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.

" }, "InheritedValue":{ "shape":"CostCategoryInheritedValueDimension", @@ -1818,7 +1834,7 @@ "documentation":"

The filter that's based on CostCategory values.

" } }, - "documentation":"

Use Expression to filter in various Cost Explorer APIs.

Not all Expression types are supported in each API. Refer to the documentation for each specific API to see what is supported.

There are two patterns:

  • Simple dimension values.

    • There are three types of simple dimension values: CostCategories, Tags, and Dimensions.

      • Specify the CostCategories field to define a filter that acts on Cost Categories.

      • Specify the Tags field to define a filter that acts on Cost Allocation Tags.

      • Specify the Dimensions field to define a filter that acts on the DimensionValues .

    • For each filter type, you can set the dimension name and values for the filters that you plan to use.

      • For example, you can filter for REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name (for example, REGION==US East (N. Virginia).

      • The corresponding Expression for this example is as follows: { \"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", “us-west-1” ] } }

      • As shown in the previous example, lists of dimension values are combined with OR when applying the filter.

    • You can also set different match options to further control how the filter behaves. Not all APIs support match options. Refer to the documentation for each specific API to see what is supported.

      • For example, you can filter for linked account names that start with “a”.

      • The corresponding Expression for this example is as follows: { \"Dimensions\": { \"Key\": \"LINKED_ACCOUNT_NAME\", \"MatchOptions\": [ \"STARTS_WITH\" ], \"Values\": [ \"a\" ] } }

  • Compound Expression types with logical operations.

    • You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. By doing this, you can filter by more advanced options.

    • For example, you can filter by ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer).

    • The corresponding Expression for this example is as follows: { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", \"us-west-1\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [\"DataTransfer\"] }}} ] }

    Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error: { \"And\": [ ... ], \"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [ \"DataTransfer\" ] } }

    The following is an example of the corresponding error message: \"Expression has more than one roots. Only one root operator is allowed for each expression: And, Or, Not, Dimensions, Tags, CostCategories\"

For the GetRightsizingRecommendation action, a combination of OR and NOT isn't supported. OR isn't supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE.

For the GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR aren't supported. Dimensions are limited to LINKED_ACCOUNT.

" + "documentation":"

Use Expression to filter in various Cost Explorer APIs.

Not all Expression types are supported in each API. Refer to the documentation for each specific API to see what is supported.

There are two patterns:

  • Simple dimension values.

    • There are three types of simple dimension values: CostCategories, Tags, and Dimensions.

      • Specify the CostCategories field to define a filter that acts on Cost Categories.

      • Specify the Tags field to define a filter that acts on Cost Allocation Tags.

      • Specify the Dimensions field to define a filter that acts on the DimensionValues .

    • For each filter type, you can set the dimension name and values for the filters that you plan to use.

      • For example, you can filter for REGION==us-east-1 OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name (for example, REGION==US East (N. Virginia).

      • The corresponding Expression for this example is as follows: { \"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", \"us-west-1\" ] } }

      • As shown in the previous example, lists of dimension values are combined with OR when applying the filter.

    • You can also set different match options to further control how the filter behaves. Not all APIs support match options. Refer to the documentation for each specific API to see what is supported.

      • For example, you can filter for linked account names that start with \"a\".

      • The corresponding Expression for this example is as follows: { \"Dimensions\": { \"Key\": \"LINKED_ACCOUNT_NAME\", \"MatchOptions\": [ \"STARTS_WITH\" ], \"Values\": [ \"a\" ] } }

  • Compound Expression types with logical operations.

    • You can use multiple Expression types and the logical operators AND/OR/NOT to create a list of one or more Expression objects. By doing this, you can filter by more advanced options.

    • For example, you can filter by ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != DataTransfer).

    • The corresponding Expression for this example is as follows: { \"And\": [ {\"Or\": [ {\"Dimensions\": { \"Key\": \"REGION\", \"Values\": [ \"us-east-1\", \"us-west-1\" ] }}, {\"Tags\": { \"Key\": \"TagName\", \"Values\": [\"Value1\"] } } ]}, {\"Not\": {\"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [\"DataTransfer\"] }}} ] }

    Because each Expression can have only one operator, the service returns an error if more than one is specified. The following example shows an Expression object that creates an error: { \"And\": [ ... ], \"Dimensions\": { \"Key\": \"USAGE_TYPE\", \"Values\": [ \"DataTransfer\" ] } }

    The following is an example of the corresponding error message: \"Expression has more than one roots. Only one root operator is allowed for each expression: And, Or, Not, Dimensions, Tags, CostCategories\"

For the GetRightsizingRecommendation action, a combination of OR and NOT isn't supported. OR isn't supported between different dimensions, or dimensions and tags. NOT operators aren't supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE.

For the GetReservationPurchaseRecommendation action, only NOT is supported. AND and OR aren't supported. Dimensions are limited to LINKED_ACCOUNT.

" }, "Expressions":{ "type":"list", @@ -2529,6 +2545,29 @@ } } }, + "GetSavingsPlanPurchaseRecommendationDetailsRequest":{ + "type":"structure", + "required":["RecommendationDetailId"], + "members":{ + "RecommendationDetailId":{ + "shape":"RecommendationDetailId", + "documentation":"

The ID that is associated with the Savings Plan recommendation.

" + } + } + }, + "GetSavingsPlanPurchaseRecommendationDetailsResponse":{ + "type":"structure", + "members":{ + "RecommendationDetailId":{ + "shape":"RecommendationDetailId", + "documentation":"

The ID that is associated with the Savings Plan recommendation.

" + }, + "RecommendationDetailData":{ + "shape":"RecommendationDetailData", + "documentation":"

Contains detailed information about a specific Savings Plan recommendation.

" + } + } + }, "GetSavingsPlansCoverageRequest":{ "type":"structure", "required":["TimePeriod"], @@ -3158,6 +3197,10 @@ "key":{"shape":"MetricName"}, "value":{"shape":"MetricValue"} }, + "MetricsOverLookbackPeriod":{ + "type":"list", + "member":{"shape":"RecommendationDetailHourlyMetrics"} + }, "ModifyRecommendationDetail":{ "type":"structure", "members":{ @@ -3346,6 +3389,151 @@ }, "RICostForUnusedHours":{"type":"string"}, "RealizedSavings":{"type":"string"}, + "RecommendationDetailData":{ + "type":"structure", + "members":{ + "AccountScope":{ + "shape":"AccountScope", + "documentation":"

The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the management account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.

" + }, + "LookbackPeriodInDays":{ + "shape":"LookbackPeriodInDays", + "documentation":"

How many days of previous usage that Amazon Web Services considers when making this recommendation.

" + }, + "SavingsPlansType":{ + "shape":"SupportedSavingsPlansType", + "documentation":"

The requested Savings Plan recommendation type.

" + }, + "TermInYears":{ + "shape":"TermInYears", + "documentation":"

The term of the commitment in years.

" + }, + "PaymentOption":{ + "shape":"PaymentOption", + "documentation":"

The payment option for the commitment (for example, All Upfront or No Upfront).

" + }, + "AccountId":{ + "shape":"GenericString", + "documentation":"

The AccountID that the recommendation is generated for.

" + }, + "CurrencyCode":{ + "shape":"GenericString", + "documentation":"

The currency code that Amazon Web Services used to generate the recommendation and present potential savings.

" + }, + "InstanceFamily":{ + "shape":"GenericString", + "documentation":"

The instance family of the recommended Savings Plan.

" + }, + "Region":{ + "shape":"GenericString", + "documentation":"

The region the recommendation is generated for.

" + }, + "OfferingId":{ + "shape":"GenericString", + "documentation":"

The unique ID that's used to distinguish Savings Plans from one another.

" + }, + "GenerationTimestamp":{"shape":"ZonedDateTime"}, + "LatestUsageTimestamp":{"shape":"ZonedDateTime"}, + "CurrentAverageHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"

The average value of hourly On-Demand spend over the lookback period of the applicable usage type.

" + }, + "CurrentMaximumHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"

The highest value of hourly On-Demand spend over the lookback period of the applicable usage type.

" + }, + "CurrentMinimumHourlyOnDemandSpend":{ + "shape":"GenericString", + "documentation":"

The lowest value of hourly On-Demand spend over the lookback period of the applicable usage type.

" + }, + "EstimatedAverageUtilization":{ + "shape":"GenericString", + "documentation":"

The estimated utilization of the recommended Savings Plan.

" + }, + "EstimatedMonthlySavingsAmount":{ + "shape":"GenericString", + "documentation":"

The estimated monthly savings amount based on the recommended Savings Plan.

" + }, + "EstimatedOnDemandCost":{ + "shape":"GenericString", + "documentation":"

The remaining On-Demand cost estimated to not be covered by the recommended Savings Plan, over the length of the lookback period.

" + }, + "EstimatedOnDemandCostWithCurrentCommitment":{ + "shape":"GenericString", + "documentation":"

The estimated On-Demand costs you expect with no additional commitment, based on your usage of the selected time period and the Savings Plan you own.

" + }, + "EstimatedROI":{ + "shape":"GenericString", + "documentation":"

The estimated return on investment that's based on the recommended Savings Plan that you purchased. This is calculated as estimatedSavingsAmount/estimatedSPCost*100.

" + }, + "EstimatedSPCost":{ + "shape":"GenericString", + "documentation":"

The cost of the recommended Savings Plan over the length of the lookback period.

" + }, + "EstimatedSavingsAmount":{ + "shape":"GenericString", + "documentation":"

The estimated savings amount that's based on the recommended Savings Plan over the length of the lookback period.

" + }, + "EstimatedSavingsPercentage":{ + "shape":"GenericString", + "documentation":"

The estimated savings percentage relative to the total cost of applicable On-Demand usage over the lookback period.

" + }, + "ExistingHourlyCommitment":{ + "shape":"GenericString", + "documentation":"

The existing hourly commitment for the Savings Plan type.

" + }, + "HourlyCommitmentToPurchase":{ + "shape":"GenericString", + "documentation":"

The recommended hourly commitment level for the Savings Plan type and the configuration that's based on the usage during the lookback period.

" + }, + "UpfrontCost":{ + "shape":"GenericString", + "documentation":"

The upfront cost of the recommended Savings Plan, based on the selected payment option.

" + }, + "CurrentAverageCoverage":{ + "shape":"GenericString", + "documentation":"

The average value of hourly coverage over the lookback period.

" + }, + "EstimatedAverageCoverage":{ + "shape":"GenericString", + "documentation":"

The estimated coverage of the recommended Savings Plan.

" + }, + "MetricsOverLookbackPeriod":{ + "shape":"MetricsOverLookbackPeriod", + "documentation":"

The related hourly cost, coverage, and utilization metrics over the lookback period.

" + } + }, + "documentation":"

The details and metrics for the given recommendation.

" + }, + "RecommendationDetailHourlyMetrics":{ + "type":"structure", + "members":{ + "StartTime":{"shape":"ZonedDateTime"}, + "EstimatedOnDemandCost":{ + "shape":"GenericString", + "documentation":"

The remaining On-Demand cost estimated to not be covered by the recommended Savings Plan, over the length of the lookback period.

" + }, + "CurrentCoverage":{ + "shape":"GenericString", + "documentation":"

The current amount of Savings Plans eligible usage that the Savings Plan covered.

" + }, + "EstimatedCoverage":{ + "shape":"GenericString", + "documentation":"

The estimated coverage amount based on the recommended Savings Plan.

" + }, + "EstimatedNewCommitmentUtilization":{ + "shape":"GenericString", + "documentation":"

The estimated utilization for the recommended Savings Plan.

" + } + }, + "documentation":"

Contains the hourly metrics for the given recommendation over the lookback period.

" + }, + "RecommendationDetailId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[\\S\\s]{8}-[\\S\\s]{4}-[\\S\\s]{4}-[\\S\\s]{4}-[\\S\\s]{12}$" + }, "RecommendationId":{ "type":"string", "max":36, @@ -4096,6 +4284,10 @@ "CurrentAverageHourlyOnDemandSpend":{ "shape":"GenericString", "documentation":"

The average value of hourly On-Demand spend over the lookback period of the applicable usage type.

" + }, + "RecommendationDetailId":{ + "shape":"RecommendationDetailId", + "documentation":"

Contains detailed information about a specific Savings Plan recommendation.

" } }, "documentation":"

Details for your recommended Savings Plans.

" @@ -4645,7 +4837,7 @@ }, "Threshold":{ "shape":"NullableNonNegativeDouble", - "documentation":"

(deprecated)

The update to the threshold value for receiving notifications.

This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.

", + "documentation":"

(deprecated)

The update to the threshold value for receiving notifications.

This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.

You can specify either Threshold or ThresholdExpression, but not both.

", "deprecated":true, "deprecatedMessage":"Threshold has been deprecated in favor of ThresholdExpression" }, @@ -4667,7 +4859,7 @@ }, "ThresholdExpression":{ "shape":"Expression", - "documentation":"

The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000.

The following are examples of valid ThresholdExpressions:

  • Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }

  • Percentage threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }

  • AND two thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }

  • OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }

" + "documentation":"

The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE, corresponding to an anomaly’s TotalImpact and TotalImpactPercentage, respectively (see Impact for more details). The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000 in string format.

You can specify either Threshold or ThresholdExpression, but not both.

The following are examples of valid ThresholdExpressions:

  • Absolute threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }

  • Percentage threshold: { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }

  • AND two thresholds together: { \"And\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }

  • OR two thresholds together: { \"Or\": [ { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_ABSOLUTE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } }, { \"Dimensions\": { \"Key\": \"ANOMALY_TOTAL_IMPACT_PERCENTAGE\", \"MatchOptions\": [ \"GREATER_THAN_OR_EQUAL\" ], \"Values\": [ \"100\" ] } } ] }

" } } }, diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index 3a801c52cf3..3c0eef5654b 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json index dcd2477861c..2faa269c432 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json @@ -62,7 +62,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

Use this API or UpdateDomain to enable identity resolution: set Matching to true.

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

" + "documentation":"

Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

Use this API or UpdateDomain to enable identity resolution: set Matching to true.

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

" }, "CreateEventStream":{ "name":"CreateEventStream", @@ -439,6 +439,23 @@ ], "documentation":"

Returns the template information for a specific object type.

A template is a predefined ProfileObjectType, such as “Salesforce-Account” or “Salesforce-Contact.” When a user sends a ProfileObject, using the PutProfileObject API, with an ObjectTypeName that matches one of the TemplateIds, it uses the mappings from the template.

" }, + "GetSimilarProfiles":{ + "name":"GetSimilarProfiles", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/matches" + }, + "input":{"shape":"GetSimilarProfilesRequest"}, + "output":{"shape":"GetSimilarProfilesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a set of profiles that belong to the same matching group using the matchId or profileId. You can also specify the type of matching that you want for finding similar profiles using either RULE_BASED_MATCHING or ML_BASED_MATCHING.

" + }, "GetWorkflow":{ "name":"GetWorkflow", "http":{ @@ -643,6 +660,23 @@ ], "documentation":"

Returns a list of objects associated with a profile of a given ProfileObjectType.

" }, + "ListRuleBasedMatches":{ + "name":"ListRuleBasedMatches", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/profiles/ruleBasedMatches" + }, + "input":{"shape":"ListRuleBasedMatchesRequest"}, + "output":{"shape":"ListRuleBasedMatchesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a set of MatchIds that belong to the given domain.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -821,7 +855,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.

After a domain is created, the name can’t be changed.

Use this API or CreateDomain to enable identity resolution: set Matching to true.

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

To add or remove tags on an existing Domain, see TagResource/UntagResource.

" + "documentation":"

Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.

After a domain is created, the name can’t be changed.

Use this API or CreateDomain to enable identity resolution: set Matching to true.

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

To add or remove tags on an existing Domain, see TagResource/UntagResource.

" }, "UpdateProfile":{ "name":"UpdateProfile", @@ -963,6 +997,12 @@ }, "documentation":"

A generic address associated with the customer that is not mailing, shipping, or billing.

" }, + "AddressList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":4, + "min":1 + }, "AppflowIntegration":{ "type":"structure", "required":["FlowDefinition"], @@ -1103,11 +1143,41 @@ "max":2, "min":1 }, + "AttributeMatchingModel":{ + "type":"string", + "enum":[ + "ONE_TO_ONE", + "MANY_TO_MANY" + ] + }, "AttributeSourceIdMap":{ "type":"map", "key":{"shape":"string1To255"}, "value":{"shape":"uuid"} }, + "AttributeTypesSelector":{ + "type":"structure", + "required":["AttributeMatchingModel"], + "members":{ + "AttributeMatchingModel":{ + "shape":"AttributeMatchingModel", + "documentation":"

Configures the AttributeMatchingModel, you can either choose ONE_TO_ONE or MANY_TO_MANY.

" + }, + "Address":{ + "shape":"AddressList", + "documentation":"

The Address type. You can choose from Address, BusinessAddress, MaillingAddress, and ShippingAddress.

You only can use the Address type in the MatchingRule. For example, if you want to match profile based on BusinessAddress.City or MaillingAddress.City, you need to choose the BusinessAddress and the MaillingAddress to represent the Address type and specify the Address.City on the matching rule.

" + }, + "PhoneNumber":{ + "shape":"PhoneNumberList", + "documentation":"

The PhoneNumber type. You can choose from PhoneNumber, HomePhoneNumber, and MobilePhoneNumber.

You only can use the PhoneNumber type in the MatchingRule. For example, if you want to match a profile based on Phone or HomePhone, you need to choose the Phone and the HomePhone to represent the PhoneNumber type and only specify the PhoneNumber on the matching rule.

" + }, + "EmailAddress":{ + "shape":"EmailList", + "documentation":"

The Email type. You can choose from EmailAddress, BusinessEmailAddress and PersonalEmailAddress.

You only can use the EmailAddress type in the MatchingRule. For example, if you want to match profile based on PersonalEmailAddress or BusinessEmailAddress, you need to choose the PersonalEmailAddress and the BusinessEmailAddress to represent the EmailAddress type and only specify the EmailAddress on the matching rule.

" + } + }, + "documentation":"

Configuration information about the AttributeTypesSelector where the rule-based identity resolution uses to match profiles. You can choose how profiles are compared across attribute types and which attribute to use for matching from each type. There are three attribute types you can configure:

  • Email type

    • You can choose from Email, BusinessEmail, and PersonalEmail

  • Phone number type

    • You can choose from Phone, HomePhone, and MobilePhone

  • Address type

    • You can choose from Address, BusinessAddress, MaillingAddress, and ShippingAddress

You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. When choosing MANY_TO_MANY, the system can match attribute across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email type. When choosing ONE_TO_ONE the system can only match if the sub-types are exact matches. For example, only when the value of the Email field of Profile A and the value of the Email field of Profile B matches, the two profiles are matched on the Email type.

" + }, "Attributes":{ "type":"map", "key":{"shape":"string1To255"}, @@ -1385,6 +1455,10 @@ "shape":"MatchingRequest", "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, + "RuleBasedMatching":{ + "shape":"RuleBasedMatchingRequest", + "documentation":"

The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

The tags used to organize, track, or control access for this resource.

" @@ -1420,6 +1494,10 @@ "shape":"MatchingResponse", "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, + "RuleBasedMatching":{ + "shape":"RuleBasedMatchingResponse", + "documentation":"

The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.

" + }, "CreatedAt":{ "shape":"timestamp", "documentation":"

The timestamp of when the domain was created.

" @@ -1983,6 +2061,12 @@ "max":1.0, "min":0.0 }, + "EmailList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":3, + "min":1 + }, "EventStreamDestinationDetails":{ "type":"structure", "required":[ @@ -2471,6 +2555,10 @@ "shape":"MatchingResponse", "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, + "RuleBasedMatching":{ + "shape":"RuleBasedMatchingResponse", + "documentation":"

The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.

" + }, "CreatedAt":{ "shape":"timestamp", "documentation":"

The timestamp of when the domain was created.

" @@ -2850,6 +2938,76 @@ } } }, + "GetSimilarProfilesRequest":{ + "type":"structure", + "required":[ + "DomainName", + "MatchType", + "SearchKey", + "SearchValue" + ], + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous GetSimilarProfiles API call.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "MatchType":{ + "shape":"MatchType", + "documentation":"

Specify the type of matching to get similar profiles for.

" + }, + "SearchKey":{ + "shape":"string1To255", + "documentation":"

The string indicating the search key to be used.

" + }, + "SearchValue":{ + "shape":"string1To255", + "documentation":"

The string based on SearchKey to be searched for similar profiles.

" + } + } + }, + "GetSimilarProfilesResponse":{ + "type":"structure", + "members":{ + "ProfileIds":{ + "shape":"ProfileIdList", + "documentation":"

Set of profileIds that belong to the same matching group.

" + }, + "MatchId":{ + "shape":"string1To255", + "documentation":"

The string matchId that the similar profiles belong to.

" + }, + "MatchType":{ + "shape":"MatchType", + "documentation":"

Specify the type of matching to get similar profiles for.

" + }, + "RuleLevel":{ + "shape":"RuleLevel", + "documentation":"

The integer rule level that the profiles matched on.

" + }, + "ConfidenceScore":{ + "shape":"Double", + "documentation":"

It only has value when the MatchType is ML_BASED_MATCHING.A number between 0 and 1, where a higher score means higher similarity. Examining match confidence scores lets you distinguish between groups of similar records in which the system is highly confident (which you may decide to merge), groups of similar records about which the system is uncertain (which you may decide to have reviewed by a human), and groups of similar records that the system deems to be unlikely (which you may decide to reject). Given confidence scores vary as per the data input, it should not be used as an absolute measure of matching quality.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous GetSimilarProfiles API call.

" + } + } + }, "GetWorkflowRequest":{ "type":"structure", "required":[ @@ -3699,6 +3857,43 @@ } } }, + "ListRuleBasedMatchesRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListRuleBasedMatches API call.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of MatchIds returned per page.

", + "location":"querystring", + "locationName":"max-results" + }, + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + } + } + }, + "ListRuleBasedMatchesResponse":{ + "type":"structure", + "members":{ + "MatchIds":{ + "shape":"MatchIdList", + "documentation":"

The list of MatchIds for the given domain.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The pagination token from the previous ListRuleBasedMatches API call.

" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -3843,6 +4038,10 @@ }, "documentation":"

The properties that are applied when Marketo is being used as a source.

" }, + "MatchIdList":{ + "type":"list", + "member":{"shape":"string1To255"} + }, "MatchItem":{ "type":"structure", "members":{ @@ -3861,6 +4060,13 @@ }, "documentation":"

The Match group object.

" }, + "MatchType":{ + "type":"string", + "enum":[ + "RULE_BASED_MATCHING", + "ML_BASED_MATCHING" + ] + }, "MatchesList":{ "type":"list", "member":{"shape":"MatchItem"} @@ -3922,6 +4128,39 @@ }, "documentation":"

The flag that enables the matching process of duplicate profiles.

" }, + "MatchingRule":{ + "type":"structure", + "required":["Rule"], + "members":{ + "Rule":{ + "shape":"MatchingRuleAttributeList", + "documentation":"

A single rule level of the MatchRules. Configures how the rule-based matching process should match profiles.

" + } + }, + "documentation":"

Specifies how does the rule-based matching process should match profiles. You can choose from the following attributes to build the matching Rule:

  • AccountNumber

  • Address.Address

  • Address.City

  • Address.Country

  • Address.County

  • Address.PostalCode

  • Address.State

  • Address.Province

  • BirthDate

  • BusinessName

  • EmailAddress

  • FirstName

  • Gender

  • LastName

  • MiddleName

  • PhoneNumber

  • Any customized profile attributes that start with the Attributes

" + }, + "MatchingRuleAttributeList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":15, + "min":1 + }, + "MatchingRules":{ + "type":"list", + "member":{"shape":"MatchingRule"}, + "max":15, + "min":1 + }, + "MaxAllowedRuleLevelForMatching":{ + "type":"integer", + "max":15, + "min":1 + }, + "MaxAllowedRuleLevelForMerging":{ + "type":"integer", + "max":15, + "min":1 + }, "MergeProfilesRequest":{ "type":"structure", "required":[ @@ -4065,6 +4304,12 @@ "OTHER" ] }, + "PhoneNumberList":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":4, + "min":1 + }, "Profile":{ "type":"structure", "members":{ @@ -4461,6 +4706,80 @@ "max":512, "pattern":"arn:aws:iam:.*:[0-9]+:.*" }, + "RuleBasedMatchingRequest":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"optionalBoolean", + "documentation":"

The flag that enables the rule-based matching process of duplicate profiles.

" + }, + "MatchingRules":{ + "shape":"MatchingRules", + "documentation":"

Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule in the MatchingRules.

" + }, + "MaxAllowedRuleLevelForMerging":{ + "shape":"MaxAllowedRuleLevelForMerging", + "documentation":"

MatchingRule

" + }, + "MaxAllowedRuleLevelForMatching":{ + "shape":"MaxAllowedRuleLevelForMatching", + "documentation":"

Indicates the maximum allowed rule level.

" + }, + "AttributeTypesSelector":{ + "shape":"AttributeTypesSelector", + "documentation":"

Configures information about the AttributeTypesSelector where the rule-based identity resolution uses to match profiles.

" + }, + "ConflictResolution":{"shape":"ConflictResolution"}, + "ExportingConfig":{"shape":"ExportingConfig"} + }, + "documentation":"

The request to enable the rule-based matching.

" + }, + "RuleBasedMatchingResponse":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"optionalBoolean", + "documentation":"

The flag that enables the rule-based matching process of duplicate profiles.

" + }, + "MatchingRules":{ + "shape":"MatchingRules", + "documentation":"

Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule in the MatchingRules.

" + }, + "Status":{ + "shape":"RuleBasedMatchingStatus", + "documentation":"

PENDING

  • The first status after configuration a rule-based matching rule. If it is an existing domain, the rule-based Identity Resolution waits one hour before creating the matching rule. If it is a new domain, the system will skip the PENDING stage.

IN_PROGRESS

  • The system is creating the rule-based matching rule. Under this status, the system is evaluating the existing data and you can no longer change the Rule-based matching configuration.

ACTIVE

  • The rule is ready to use. You can change the rule a day after the status is in ACTIVE.

" + }, + "MaxAllowedRuleLevelForMerging":{ + "shape":"MaxAllowedRuleLevelForMerging", + "documentation":"

MatchingRule

" + }, + "MaxAllowedRuleLevelForMatching":{ + "shape":"MaxAllowedRuleLevelForMatching", + "documentation":"

Indicates the maximum allowed rule level.

" + }, + "AttributeTypesSelector":{ + "shape":"AttributeTypesSelector", + "documentation":"

Configures information about the AttributeTypesSelector where the rule-based identity resolution uses to match profiles.

" + }, + "ConflictResolution":{"shape":"ConflictResolution"}, + "ExportingConfig":{"shape":"ExportingConfig"} + }, + "documentation":"

The response of the Rule-based matching request.

" + }, + "RuleBasedMatchingStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "ACTIVE" + ] + }, + "RuleLevel":{ + "type":"integer", + "max":15, + "min":1 + }, "S3ConnectorOperator":{ "type":"string", "enum":[ @@ -5172,6 +5491,10 @@ "shape":"MatchingRequest", "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, + "RuleBasedMatching":{ + "shape":"RuleBasedMatchingRequest", + "documentation":"

The process of matching duplicate profiles using the rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.

" + }, "Tags":{ "shape":"TagMap", "documentation":"

The tags used to organize, track, or control access for this resource.

" @@ -5206,6 +5529,10 @@ "shape":"MatchingResponse", "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, + "RuleBasedMatching":{ + "shape":"RuleBasedMatchingResponse", + "documentation":"

The process of matching duplicate profiles using the rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3.

" + }, "CreatedAt":{ "shape":"timestamp", "documentation":"

The timestamp of when the domain was created.

" diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 79a68c0923a..69434417dbf 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databasemigration/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/databasemigration/src/main/resources/codegen-resources/endpoint-rule-set.json index c1910361de3..0ee72506b09 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/databasemigration/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://dms-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://dms-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,205 +225,165 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://dms.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://dms.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-iso", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://dms.{Region}.c2s.ic.gov", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + "fn": "stringEquals", + "argv": [ + "aws-iso", { - "conditions": [ + "fn": "getAttr", + "argv": [ { - "fn": "stringEquals", - "argv": [ - "aws-iso-b", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://dms.{Region}.sc2s.sgov.gov", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://dms-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "PartitionResult" + }, + "name" + ] } ] } - ] + ], + "endpoint": { + "url": "https://dms.{Region}.c2s.ic.gov", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-iso-b", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://dms.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://dms.{Region}.sc2s.sgov.gov", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://dms-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://dms.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://dms.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://dms.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json b/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json index b9b289acd54..9a203275228 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json +++ b/services/databasemigration/src/main/resources/codegen-resources/paginators-1.json @@ -15,6 +15,11 @@ "output_token": "Marker", "limit_key": "MaxRecords" }, + "DescribeDataProviders": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeEndpointSettings": { "input_token": "Marker", "output_token": "Marker", @@ -30,6 +35,11 @@ "output_token": "Marker", "limit_key": "MaxRecords" }, + "DescribeEngineVersions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeEventSubscriptions": { "input_token": "Marker", "output_token": "Marker", @@ -40,6 +50,11 @@ "output_token": "Marker", "limit_key": "MaxRecords" }, + "DescribeExtensionPackAssociations": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeFleetAdvisorCollectors": { "input_token": "NextToken", "output_token": "NextToken", @@ -65,6 +80,41 @@ "output_token": "NextToken", "limit_key": "MaxRecords" }, + "DescribeInstanceProfiles": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelAssessments": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelConversions": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelExportsAsScript": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelExportsToTarget": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMetadataModelImports": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeMigrationProjects": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeOrderableReplicationInstances": { "input_token": "Marker", "output_token": "Marker", diff --git a/services/databasemigration/src/main/resources/codegen-resources/service-2.json b/services/databasemigration/src/main/resources/codegen-resources/service-2.json index 7a32af8d2f6..3250f07993d 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/service-2.json +++ b/services/databasemigration/src/main/resources/codegen-resources/service-2.json @@ -68,6 +68,21 @@ ], "documentation":"

Cancels a single premigration assessment run.

This operation prevents any individual assessments from running if they haven't started running. It also attempts to cancel any individual assessments that are currently running.

" }, + "CreateDataProvider":{ + "name":"CreateDataProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataProviderMessage"}, + "output":{"shape":"CreateDataProviderResponse"}, + "errors":[ + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceAlreadyExistsFault"} + ], + "documentation":"

Creates a data provider using the provided settings. A data provider stores a data store type and location information about your database.

" + }, "CreateEndpoint":{ "name":"CreateEndpoint", "http":{ @@ -126,6 +141,44 @@ ], "documentation":"

Creates a Fleet Advisor collector using the specified parameters.

" }, + "CreateInstanceProfile":{ + "name":"CreateInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInstanceProfileMessage"}, + "output":{"shape":"CreateInstanceProfileResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"

Creates the instance profile using the specified parameters.

" + }, + "CreateMigrationProject":{ + "name":"CreateMigrationProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMigrationProjectMessage"}, + "output":{"shape":"CreateMigrationProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"

Creates the migration project using the specified parameters.

You can run this action only after you create an instance profile and data providers using CreateInstanceProfile and CreateDataProvider.

" + }, "CreateReplicationConfig":{ "name":"CreateReplicationConfig", "http":{ @@ -233,6 +286,21 @@ ], "documentation":"

Deletes the connection between a replication instance and an endpoint.

" }, + "DeleteDataProvider":{ + "name":"DeleteDataProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDataProviderMessage"}, + "output":{"shape":"DeleteDataProviderResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"

Deletes the specified data provider.

All migration projects associated with the data provider must be deleted or modified before you can delete the data provider.

" + }, "DeleteEndpoint":{ "name":"DeleteEndpoint", "http":{ @@ -288,6 +356,36 @@ ], "documentation":"

Deletes the specified Fleet Advisor collector databases.

" }, + "DeleteInstanceProfile":{ + "name":"DeleteInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInstanceProfileMessage"}, + "output":{"shape":"DeleteInstanceProfileResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"

Deletes the specified instance profile.

All migration projects associated with the instance profile must be deleted or modified before you can delete the instance profile.

" + }, + "DeleteMigrationProject":{ + "name":"DeleteMigrationProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMigrationProjectMessage"}, + "output":{"shape":"DeleteMigrationProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"

Deletes the specified migration project.

The migration project must be closed before you can delete it.

" + }, "DeleteReplicationConfig":{ "name":"DeleteReplicationConfig", "http":{ @@ -411,6 +509,33 @@ ], "documentation":"

Describes the status of the connections that have been made between the replication instance and an endpoint. Connections are created when you test an endpoint.

" }, + "DescribeConversionConfiguration":{ + "name":"DescribeConversionConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConversionConfigurationMessage"}, + "output":{"shape":"DescribeConversionConfigurationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Returns configuration parameters for a schema conversion project.

" + }, + "DescribeDataProviders":{ + "name":"DescribeDataProviders", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataProvidersMessage"}, + "output":{"shape":"DescribeDataProvidersResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"

Returns a paginated list of data providers for your account in the current region.

" + }, "DescribeEndpointSettings":{ "name":"DescribeEndpointSettings", "http":{ @@ -444,6 +569,16 @@ ], "documentation":"

Returns information about the endpoints for your account in the current region.

" }, + "DescribeEngineVersions":{ + "name":"DescribeEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEngineVersionsMessage"}, + "output":{"shape":"DescribeEngineVersionsResponse"}, + "documentation":"

Returns information about the replication instance versions used in the project.

" + }, "DescribeEventCategories":{ "name":"DescribeEventCategories", "http":{ @@ -477,6 +612,16 @@ "output":{"shape":"DescribeEventsResponse"}, "documentation":"

Lists events for a given source identifier and source type. You can also specify a start and end time. For more information on DMS events, see Working with Events and Notifications in the Database Migration Service User Guide.

" }, + "DescribeExtensionPackAssociations":{ + "name":"DescribeExtensionPackAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeExtensionPackAssociationsMessage"}, + "output":{"shape":"DescribeExtensionPackAssociationsResponse"}, + "documentation":"

Returns a paginated list of extension pack associations for the specified migration project. An extension pack is an add-on module that emulates functions present in a source database that are required when converting objects to the target database.

" + }, "DescribeFleetAdvisorCollectors":{ "name":"DescribeFleetAdvisorCollectors", "http":{ @@ -542,6 +687,99 @@ ], "documentation":"

Returns a list of schemas detected by Fleet Advisor Collectors in your account.

" }, + "DescribeInstanceProfiles":{ + "name":"DescribeInstanceProfiles", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInstanceProfilesMessage"}, + "output":{"shape":"DescribeInstanceProfilesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"

Returns a paginated list of instance profiles for your account in the current region.

" + }, + "DescribeMetadataModelAssessments":{ + "name":"DescribeMetadataModelAssessments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelAssessmentsMessage"}, + "output":{"shape":"DescribeMetadataModelAssessmentsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Returns a paginated list of metadata model assessments for your account in the current region.

" + }, + "DescribeMetadataModelConversions":{ + "name":"DescribeMetadataModelConversions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelConversionsMessage"}, + "output":{"shape":"DescribeMetadataModelConversionsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Returns a paginated list of metadata model conversions for a migration project.

" + }, + "DescribeMetadataModelExportsAsScript":{ + "name":"DescribeMetadataModelExportsAsScript", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelExportsAsScriptMessage"}, + "output":{"shape":"DescribeMetadataModelExportsAsScriptResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Returns a paginated list of metadata model exports.

" + }, + "DescribeMetadataModelExportsToTarget":{ + "name":"DescribeMetadataModelExportsToTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelExportsToTargetMessage"}, + "output":{"shape":"DescribeMetadataModelExportsToTargetResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Returns a paginated list of metadata model exports.

" + }, + "DescribeMetadataModelImports":{ + "name":"DescribeMetadataModelImports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMetadataModelImportsMessage"}, + "output":{"shape":"DescribeMetadataModelImportsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Returns a paginated list of metadata model imports.

" + }, + "DescribeMigrationProjects":{ + "name":"DescribeMigrationProjects", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMigrationProjectsMessage"}, + "output":{"shape":"DescribeMigrationProjectsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"

Returns a paginated list of migration projects for your account in the current region.

" + }, "DescribeOrderableReplicationInstances":{ "name":"DescribeOrderableReplicationInstances", "http":{ @@ -767,6 +1005,19 @@ ], "documentation":"

Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.

Note that the \"last updated\" column the DMS console only indicates the time that DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.

" }, + "ExportMetadataModelAssessment":{ + "name":"ExportMetadataModelAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportMetadataModelAssessmentMessage"}, + "output":{"shape":"ExportMetadataModelAssessmentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Saves a copy of a database migration assessment report to your Amazon S3 bucket. DMS can save your assessment report as a comma-separated value (CSV) or a PDF file.

" + }, "ImportCertificate":{ "name":"ImportCertificate", "http":{ @@ -795,6 +1046,35 @@ ], "documentation":"

Lists all metadata tags attached to an DMS resource, including replication instance, endpoint, subnet group, and migration task. For more information, see Tag data type description.

" }, + "ModifyConversionConfiguration":{ + "name":"ModifyConversionConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyConversionConfigurationMessage"}, + "output":{"shape":"ModifyConversionConfigurationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"

Modifies the specified schema conversion configuration using the provided parameters.

" + }, + "ModifyDataProvider":{ + "name":"ModifyDataProvider", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDataProviderMessage"}, + "output":{"shape":"ModifyDataProviderResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ], + "documentation":"

Modifies the specified data provider using the provided settings.

You must remove the data provider from all migration projects before you can modify it.

" + }, "ModifyEndpoint":{ "name":"ModifyEndpoint", "http":{ @@ -833,6 +1113,41 @@ ], "documentation":"

Modifies an existing DMS event notification subscription.

" }, + "ModifyInstanceProfile":{ + "name":"ModifyInstanceProfile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceProfileMessage"}, + "output":{"shape":"ModifyInstanceProfileResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"

Modifies the specified instance profile using the provided parameters.

All migration projects associated with the instance profile must be deleted or modified before you can modify the instance profile.

" + }, + "ModifyMigrationProject":{ + "name":"ModifyMigrationProject", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyMigrationProjectMessage"}, + "output":{"shape":"ModifyMigrationProjectResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"

Modifies the specified migration project using the provided parameters.

The migration project must be closed before you can modify it.

" + }, "ModifyReplicationConfig":{ "name":"ModifyReplicationConfig", "http":{ @@ -1005,65 +1320,185 @@ ], "documentation":"

Runs large-scale assessment (LSA) analysis on every Fleet Advisor collector in your account.

" }, - "StartRecommendations":{ - "name":"StartRecommendations", + "StartExtensionPackAssociation":{ + "name":"StartExtensionPackAssociation", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"StartRecommendationsRequest"}, + "input":{"shape":"StartExtensionPackAssociationMessage"}, + "output":{"shape":"StartExtensionPackAssociationResponse"}, "errors":[ - {"shape":"InvalidResourceStateFault"}, {"shape":"AccessDeniedFault"}, - {"shape":"ResourceNotFoundFault"} + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} ], - "documentation":"

Starts the analysis of your source database to provide recommendations of target engines.

You can create recommendations for multiple source databases using BatchStartRecommendations.

" + "documentation":"

Applies the extension pack to your target database. An extension pack is an add-on module that emulates functions present in a source database that are required when converting objects to the target database.

" }, - "StartReplication":{ - "name":"StartReplication", + "StartMetadataModelAssessment":{ + "name":"StartMetadataModelAssessment", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"StartReplicationMessage"}, - "output":{"shape":"StartReplicationResponse"}, + "input":{"shape":"StartMetadataModelAssessmentMessage"}, + "output":{"shape":"StartMetadataModelAssessmentResponse"}, "errors":[ - {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"}, {"shape":"InvalidResourceStateFault"}, - {"shape":"AccessDeniedFault"} + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} ], - "documentation":"

For a given DMS Serverless replication configuration, DMS connects to the source endpoint and collects the metadata to analyze the replication workload. Using this metadata, DMS then computes and provisions the required capacity and starts replicating to the target endpoint using the server resources that DMS has provisioned for the DMS Serverless replication.

" + "documentation":"

Creates a database migration assessment report by assessing the migration complexity for your source database. A database migration assessment report summarizes all of the schema conversion tasks. It also details the action items for database objects that can't be converted to the database engine of your target database instance.

" }, - "StartReplicationTask":{ - "name":"StartReplicationTask", + "StartMetadataModelConversion":{ + "name":"StartMetadataModelConversion", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"StartReplicationTaskMessage"}, - "output":{"shape":"StartReplicationTaskResponse"}, + "input":{"shape":"StartMetadataModelConversionMessage"}, + "output":{"shape":"StartMetadataModelConversionResponse"}, "errors":[ - {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"}, {"shape":"InvalidResourceStateFault"}, - {"shape":"AccessDeniedFault"} + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} ], - "documentation":"

Starts the replication task.

For more information about DMS tasks, see Working with Migration Tasks in the Database Migration Service User Guide.

" + "documentation":"

Converts your source database objects to a format compatible with the target database.

" }, - "StartReplicationTaskAssessment":{ - "name":"StartReplicationTaskAssessment", + "StartMetadataModelExportAsScript":{ + "name":"StartMetadataModelExportAsScript", "http":{ "method":"POST", "requestUri":"/" }, - "input":{"shape":"StartReplicationTaskAssessmentMessage"}, - "output":{"shape":"StartReplicationTaskAssessmentResponse"}, + "input":{"shape":"StartMetadataModelExportAsScriptMessage"}, + "output":{"shape":"StartMetadataModelExportAsScriptResponse"}, "errors":[ + {"shape":"AccessDeniedFault"}, {"shape":"InvalidResourceStateFault"}, - {"shape":"ResourceNotFoundFault"} - ], - "documentation":"

Starts the replication task assessment for unsupported data types in the source database.

You can only use this operation for a task if the following conditions are true:

  • The task must be in the stopped state.

  • The task must have successful connections to the source and target.

If either of these conditions are not met, an InvalidResourceStateFault error will result.

For information about DMS task assessments, see Creating a task assessment report in the Database Migration Service User Guide.

" - }, - "StartReplicationTaskAssessmentRun":{ + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"

Saves your converted code to a file as a SQL script, and stores this file on your Amazon S3 bucket.

" + }, + "StartMetadataModelExportToTarget":{ + "name":"StartMetadataModelExportToTarget", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMetadataModelExportToTargetMessage"}, + "output":{"shape":"StartMetadataModelExportToTargetResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"

Applies converted database objects to your target database.

" + }, + "StartMetadataModelImport":{ + "name":"StartMetadataModelImport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartMetadataModelImportMessage"}, + "output":{"shape":"StartMetadataModelImportResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceAlreadyExistsFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"ResourceQuotaExceededFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"S3AccessDeniedFault"} + ], + "documentation":"

Loads the metadata for all the dependent database objects of the parent object.

This operation uses your project's Amazon S3 bucket as a metadata cache to improve performance.

" + }, + "StartRecommendations":{ + "name":"StartRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartRecommendationsRequest"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Starts the analysis of your source database to provide recommendations of target engines.

You can create recommendations for multiple source databases using BatchStartRecommendations.

" + }, + "StartReplication":{ + "name":"StartReplication", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplicationMessage"}, + "output":{"shape":"StartReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"

For a given DMS Serverless replication configuration, DMS connects to the source endpoint and collects the metadata to analyze the replication workload. Using this metadata, DMS then computes and provisions the required capacity and starts replicating to the target endpoint using the server resources that DMS has provisioned for the DMS Serverless replication.

" + }, + "StartReplicationTask":{ + "name":"StartReplicationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplicationTaskMessage"}, + "output":{"shape":"StartReplicationTaskResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"} + ], + "documentation":"

Starts the replication task.

For more information about DMS tasks, see Working with Migration Tasks in the Database Migration Service User Guide.

" + }, + "StartReplicationTaskAssessment":{ + "name":"StartReplicationTaskAssessment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplicationTaskAssessmentMessage"}, + "output":{"shape":"StartReplicationTaskAssessmentResponse"}, + "errors":[ + {"shape":"InvalidResourceStateFault"}, + {"shape":"ResourceNotFoundFault"} + ], + "documentation":"

Starts the replication task assessment for unsupported data types in the source database.

You can only use this operation for a task if the following conditions are true:

  • The task must be in the stopped state.

  • The task must have successful connections to the source and target.

If either of these conditions are not met, an InvalidResourceStateFault error will result.

For information about DMS task assessments, see Creating a task assessment report in the Database Migration Service User Guide.

" + }, + "StartReplicationTaskAssessmentRun":{ "name":"StartReplicationTaskAssessmentRun", "http":{ "method":"POST", @@ -1243,6 +1678,18 @@ "type":"list", "member":{"shape":"String"} }, + "AssessmentReportType":{ + "type":"string", + "enum":[ + "pdf", + "csv" + ] + }, + "AssessmentReportTypesList":{ + "type":"list", + "member":{"shape":"AssessmentReportType"}, + "min":1 + }, "AuthMechanismValue":{ "type":"string", "enum":[ @@ -1272,6 +1719,10 @@ "type":"list", "member":{"shape":"String"} }, + "AvailableUpgradesList":{ + "type":"list", + "member":{"shape":"String"} + }, "BatchStartRecommendationsErrorEntry":{ "type":"structure", "members":{ @@ -1601,6 +2052,44 @@ "type":"list", "member":{"shape":"Connection"} }, + "CreateDataProviderMessage":{ + "type":"structure", + "required":[ + "Engine", + "Settings" + ], + "members":{ + "DataProviderName":{ + "shape":"String", + "documentation":"

A user-friendly name for the data provider.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A user-friendly description of the data provider.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The type of database engine for the data provider. Valid values include \"aurora\", \"aurora_postgresql\", \"mysql\", \"oracle\", \"postgres\", and \"sqlserver\". A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.

" + }, + "Settings":{ + "shape":"DataProviderSettings", + "documentation":"

The settings in JSON format for a data provider.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

One or more tags to be assigned to the data provider.

" + } + } + }, + "CreateDataProviderResponse":{ + "type":"structure", + "members":{ + "DataProvider":{ + "shape":"DataProvider", + "documentation":"

The data provider that was created.

" + } + } + }, "CreateEndpointMessage":{ "type":"structure", "required":[ @@ -1855,6 +2344,107 @@ } } }, + "CreateInstanceProfileMessage":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone where the instance profile will be created. The default value is a random, system-chosen Availability Zone in the Amazon Web Services Region where your data provider is created, for examplem us-east-1d.

" + }, + "KmsKeyArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the connection parameters for the instance profile.

If you don't specify a value for the KmsKeyArn parameter, then DMS uses your default encryption key.

KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region.

" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"

Specifies the accessibility options for the instance profile. A value of true represents an instance profile with a public IP address. A value of false represents an instance profile with a private IP address. The default value is true.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

One or more tags to be assigned to the instance profile.

" + }, + "NetworkType":{ + "shape":"String", + "documentation":"

Specifies the network type for the instance profile. A value of IPV4 represents an instance profile with IPv4 network type and only supports IPv4 addressing. A value of IPV6 represents an instance profile with IPv6 network type and only supports IPv6 addressing. A value of DUAL represents an instance profile with dual network type that supports IPv4 and IPv6 addressing.

" + }, + "InstanceProfileName":{ + "shape":"String", + "documentation":"

A user-friendly name for the instance profile.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A user-friendly description of the instance profile.

" + }, + "SubnetGroupIdentifier":{ + "shape":"String", + "documentation":"

A subnet group to associate with the instance profile.

" + }, + "VpcSecurityGroups":{ + "shape":"StringList", + "documentation":"

Specifies the VPC security group names to be used with the instance profile. The VPC security group must work with the VPC containing the instance profile.

" + } + } + }, + "CreateInstanceProfileResponse":{ + "type":"structure", + "members":{ + "InstanceProfile":{ + "shape":"InstanceProfile", + "documentation":"

The instance profile that was created.

" + } + } + }, + "CreateMigrationProjectMessage":{ + "type":"structure", + "required":[ + "SourceDataProviderDescriptors", + "TargetDataProviderDescriptors", + "InstanceProfileIdentifier" + ], + "members":{ + "MigrationProjectName":{ + "shape":"String", + "documentation":"

A user-friendly name for the migration project.

" + }, + "SourceDataProviderDescriptors":{ + "shape":"DataProviderDescriptorDefinitionList", + "documentation":"

Information about the source data provider, including the name, ARN, and Secrets Manager parameters.

" + }, + "TargetDataProviderDescriptors":{ + "shape":"DataProviderDescriptorDefinitionList", + "documentation":"

Information about the target data provider, including the name, ARN, and Amazon Web Services Secrets Manager parameters.

" + }, + "InstanceProfileIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the associated instance profile. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.

" + }, + "TransformationRules":{ + "shape":"String", + "documentation":"

The settings in JSON format for migration rules. Migration rules make it possible for you to change the object names according to the rules that you specify. For example, you can change an object name to lowercase or uppercase, add or remove a prefix or suffix, or rename objects.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A user-friendly description of the migration project.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

One or more tags to be assigned to the migration project.

" + }, + "SchemaConversionApplicationAttributes":{ + "shape":"SCApplicationAttributes", + "documentation":"

The schema conversion application attributes, including the Amazon S3 bucket name and Amazon S3 role ARN.

" + } + } + }, + "CreateMigrationProjectResponse":{ + "type":"structure", + "members":{ + "MigrationProject":{ + "shape":"MigrationProject", + "documentation":"

The migration project that was created.

" + } + } + }, "CreateReplicationConfigMessage":{ "type":"structure", "required":[ @@ -2123,6 +2713,100 @@ "parquet" ] }, + "DataProvider":{ + "type":"structure", + "members":{ + "DataProviderName":{ + "shape":"String", + "documentation":"

The name of the data provider.

" + }, + "DataProviderArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the data provider.

" + }, + "DataProviderCreationTime":{ + "shape":"Iso8601DateTime", + "documentation":"

The time the data provider was created.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description of the data provider. Descriptions can have up to 31 characters. A description can contain only ASCII letters, digits, and hyphens ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The type of database engine for the data provider. Valid values include \"aurora\", \"aurora_postgresql\", \"mysql\", \"oracle\", \"postgres\", and \"sqlserver\". A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.

" + }, + "Settings":{ + "shape":"DataProviderSettings", + "documentation":"

The settings in JSON format for a data provider.

" + } + }, + "documentation":"

Provides information that defines a data provider.

" + }, + "DataProviderDescriptor":{ + "type":"structure", + "members":{ + "SecretsManagerSecretId":{ + "shape":"String", + "documentation":"

The identifier of the Amazon Web Services Secrets Manager Secret used to store access credentials for the data provider.

" + }, + "SecretsManagerAccessRoleArn":{ + "shape":"String", + "documentation":"

The ARN of the role used to access Amazon Web Services Secrets Manager.

" + }, + "DataProviderName":{ + "shape":"String", + "documentation":"

The user-friendly name of the data provider.

" + }, + "DataProviderArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the data provider.

" + } + }, + "documentation":"

Information about a data provider.

" + }, + "DataProviderDescriptorDefinition":{ + "type":"structure", + "required":["DataProviderIdentifier"], + "members":{ + "DataProviderIdentifier":{ + "shape":"String", + "documentation":"

The name or Amazon Resource Name (ARN) of the data provider.

" + }, + "SecretsManagerSecretId":{ + "shape":"String", + "documentation":"

The identifier of the Amazon Web Services Secrets Manager Secret used to store access credentials for the data provider.

" + }, + "SecretsManagerAccessRoleArn":{ + "shape":"String", + "documentation":"

The ARN of the role used to access Amazon Web Services Secrets Manager.

" + } + }, + "documentation":"

Information about a data provider.

" + }, + "DataProviderDescriptorDefinitionList":{ + "type":"list", + "member":{"shape":"DataProviderDescriptorDefinition"} + }, + "DataProviderDescriptorList":{ + "type":"list", + "member":{"shape":"DataProviderDescriptor"} + }, + "DataProviderList":{ + "type":"list", + "member":{"shape":"DataProvider"} + }, + "DataProviderSettings":{ + "type":"structure", + "members":{ + "PostgreSqlSettings":{"shape":"PostgreSqlDataProviderSettings"}, + "MySqlSettings":{"shape":"MySqlDataProviderSettings"}, + "OracleSettings":{"shape":"OracleDataProviderSettings"}, + "MicrosoftSqlServerSettings":{"shape":"MicrosoftSqlServerDataProviderSettings"} + }, + "documentation":"

Provides information that defines a data provider.

", + "union":true + }, "DatabaseInstanceSoftwareDetailsResponse":{ "type":"structure", "members":{ @@ -2243,6 +2927,16 @@ "DDMMYYYY" ] }, + "DefaultErrorDetails":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"

The error message.

" + } + }, + "documentation":"

Provides error information about a schema conversion operation.

" + }, "DeleteCertificateMessage":{ "type":"structure", "required":["CertificateArn"], @@ -2300,16 +2994,35 @@ }, "documentation":"

" }, - "DeleteEndpointMessage":{ + "DeleteDataProviderMessage":{ "type":"structure", - "required":["EndpointArn"], + "required":["DataProviderIdentifier"], "members":{ - "EndpointArn":{ + "DataProviderIdentifier":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

" + "documentation":"

The identifier of the data provider to delete.

" } - }, - "documentation":"

" + } + }, + "DeleteDataProviderResponse":{ + "type":"structure", + "members":{ + "DataProvider":{ + "shape":"DataProvider", + "documentation":"

The data provider that was deleted.

" + } + } + }, + "DeleteEndpointMessage":{ + "type":"structure", + "required":["EndpointArn"], + "members":{ + "EndpointArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

" + } + }, + "documentation":"

" }, "DeleteEndpointResponse":{ "type":"structure", @@ -2361,6 +3074,44 @@ } } }, + "DeleteInstanceProfileMessage":{ + "type":"structure", + "required":["InstanceProfileIdentifier"], + "members":{ + "InstanceProfileIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the instance profile to delete.

" + } + } + }, + "DeleteInstanceProfileResponse":{ + "type":"structure", + "members":{ + "InstanceProfile":{ + "shape":"InstanceProfile", + "documentation":"

The instance profile that was deleted.

" + } + } + }, + "DeleteMigrationProjectMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The name or Amazon Resource Name (ARN) of the migration project to delete.

" + } + } + }, + "DeleteMigrationProjectResponse":{ + "type":"structure", + "members":{ + "MigrationProject":{ + "shape":"MigrationProject", + "documentation":"

The migration project that was deleted.

" + } + } + }, "DeleteReplicationConfigMessage":{ "type":"structure", "required":["ReplicationConfigArn"], @@ -2592,6 +3343,59 @@ }, "documentation":"

" }, + "DescribeConversionConfigurationMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The name or Amazon Resource Name (ARN) for the schema conversion project to describe.

" + } + } + }, + "DescribeConversionConfigurationResponse":{ + "type":"structure", + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The name or Amazon Resource Name (ARN) for the schema conversion project.

" + }, + "ConversionConfiguration":{ + "shape":"String", + "documentation":"

The configuration parameters for the schema conversion project.

" + } + } + }, + "DescribeDataProvidersMessage":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters applied to the data providers described in the form of key-value pairs.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + } + } + }, + "DescribeDataProvidersResponse":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "DataProviders":{ + "shape":"DataProviderList", + "documentation":"

A description of data providers.

" + } + } + }, "DescribeEndpointSettingsMessage":{ "type":"structure", "required":["EngineName"], @@ -2687,6 +3491,32 @@ }, "documentation":"

" }, + "DescribeEngineVersionsMessage":{ + "type":"structure", + "members":{ + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + } + }, + "DescribeEngineVersionsResponse":{ + "type":"structure", + "members":{ + "EngineVersions":{ + "shape":"EngineVersionList", + "documentation":"

Returned EngineVersion objects that describe the replication instance engine versions used in the project.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + } + }, "DescribeEventCategoriesMessage":{ "type":"structure", "members":{ @@ -2803,6 +3633,41 @@ }, "documentation":"

" }, + "DescribeExtensionPackAssociationsMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The name or Amazon Resource Name (ARN) for the migration project.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters applied to the extension pack associations described in the form of key-value pairs.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.

" + } + } + }, + "DescribeExtensionPackAssociationsResponse":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "Requests":{ + "shape":"SchemaConversionRequestList", + "documentation":"

A paginated list of extension pack associations for the specified migration project.

" + } + } + }, "DescribeFleetAdvisorCollectorsRequest":{ "type":"structure", "members":{ @@ -2829,123 +3694,358 @@ }, "NextToken":{ "shape":"String", - "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeFleetAdvisorDatabasesRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

If you specify any of the following filters, the output includes information for only those databases that meet the filter criteria:

  • database-id – The ID of the database.

  • database-name – The name of the database.

  • database-engine – The name of the database engine.

  • server-ip-address – The IP address of the database server.

  • database-ip-address – The IP address of the database.

  • collector-name – The name of the associated Fleet Advisor collector.

An example is: describe-fleet-advisor-databases --filter Name=\"database-id\",Values=\"45\"

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

Sets the maximum number of records returned in the response.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeFleetAdvisorDatabasesResponse":{ + "type":"structure", + "members":{ + "Databases":{ + "shape":"DatabaseList", + "documentation":"

Provides descriptions of the Fleet Advisor collector databases, including the database's collector, ID, and name.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeFleetAdvisorLsaAnalysisRequest":{ + "type":"structure", + "members":{ + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

Sets the maximum number of records returned in the response.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeFleetAdvisorLsaAnalysisResponse":{ + "type":"structure", + "members":{ + "Analysis":{ + "shape":"FleetAdvisorLsaAnalysisResponseList", + "documentation":"

A list of FleetAdvisorLsaAnalysisResponse objects.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeFleetAdvisorSchemaObjectSummaryRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

If you specify any of the following filters, the output includes information for only those schema objects that meet the filter criteria:

  • schema-id – The ID of the schema, for example d4610ac5-e323-4ad9-bc50-eaf7249dfe9d.

Example: describe-fleet-advisor-schema-object-summary --filter Name=\"schema-id\",Values=\"50\"

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

Sets the maximum number of records returned in the response.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeFleetAdvisorSchemaObjectSummaryResponse":{ + "type":"structure", + "members":{ + "FleetAdvisorSchemaObjects":{ + "shape":"FleetAdvisorSchemaObjectList", + "documentation":"

A collection of FleetAdvisorSchemaObjectResponse objects.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeFleetAdvisorSchemasRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

If you specify any of the following filters, the output includes information for only those schemas that meet the filter criteria:

  • complexity – The schema's complexity, for example Simple.

  • database-id – The ID of the schema's database.

  • database-ip-address – The IP address of the schema's database.

  • database-name – The name of the schema's database.

  • database-engine – The name of the schema database's engine.

  • original-schema-name – The name of the schema's database's main schema.

  • schema-id – The ID of the schema, for example 15.

  • schema-name – The name of the schema.

  • server-ip-address – The IP address of the schema database's server.

An example is: describe-fleet-advisor-schemas --filter Name=\"schema-id\",Values=\"50\"

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

Sets the maximum number of records returned in the response.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeFleetAdvisorSchemasResponse":{ + "type":"structure", + "members":{ + "FleetAdvisorSchemas":{ + "shape":"FleetAdvisorSchemaList", + "documentation":"

A collection of SchemaResponse objects.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + } + } + }, + "DescribeInstanceProfilesMessage":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters applied to the instance profiles described in the form of key-value pairs.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + } + } + }, + "DescribeInstanceProfilesResponse":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "InstanceProfiles":{ + "shape":"InstanceProfileList", + "documentation":"

A description of instance profiles.

" + } + } + }, + "DescribeMetadataModelAssessmentsMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The name or Amazon Resource Name (ARN) of the migration project.

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters applied to the metadata model assessments described in the form of key-value pairs.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.

" + } + } + }, + "DescribeMetadataModelAssessmentsResponse":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "Requests":{ + "shape":"SchemaConversionRequestList", + "documentation":"

A paginated list of metadata model assessments for the specified migration project.

" + } + } + }, + "DescribeMetadataModelConversionsMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters applied to the metadata model conversions described in the form of key-value pairs.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.

" + } + } + }, + "DescribeMetadataModelConversionsResponse":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "Requests":{ + "shape":"SchemaConversionRequestList", + "documentation":"

A paginated list of metadata model conversions.

" } } }, - "DescribeFleetAdvisorDatabasesRequest":{ + "DescribeMetadataModelExportsAsScriptMessage":{ "type":"structure", + "required":["MigrationProjectIdentifier"], "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, "Filters":{ "shape":"FilterList", - "documentation":"

If you specify any of the following filters, the output includes information for only those databases that meet the filter criteria:

  • database-id – The ID of the database.

  • database-name – The name of the database.

  • database-engine – The name of the database engine.

  • server-ip-address – The IP address of the database server.

  • database-ip-address – The IP address of the database.

  • collector-name – The name of the associated Fleet Advisor collector.

An example is: describe-fleet-advisor-databases --filter Name=\"database-id\",Values=\"45\"

" + "documentation":"

Filters applied to the metadata model exports described in the form of key-value pairs.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"

Sets the maximum number of records returned in the response.

" - }, - "NextToken":{ - "shape":"String", - "documentation":"

If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.

" } } }, - "DescribeFleetAdvisorDatabasesResponse":{ + "DescribeMetadataModelExportsAsScriptResponse":{ "type":"structure", "members":{ - "Databases":{ - "shape":"DatabaseList", - "documentation":"

Provides descriptions of the Fleet Advisor collector databases, including the database's collector, ID, and name.

" - }, - "NextToken":{ + "Marker":{ "shape":"String", - "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "Requests":{ + "shape":"SchemaConversionRequestList", + "documentation":"

A paginated list of metadata model exports.

" } } }, - "DescribeFleetAdvisorLsaAnalysisRequest":{ + "DescribeMetadataModelExportsToTargetMessage":{ "type":"structure", + "required":["MigrationProjectIdentifier"], "members":{ - "MaxRecords":{ - "shape":"IntegerOptional", - "documentation":"

Sets the maximum number of records returned in the response.

" + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" }, - "NextToken":{ + "Filters":{ + "shape":"FilterList", + "documentation":"

Filters applied to the metadata model exports described in the form of key-value pairs.

" + }, + "Marker":{ "shape":"String", - "documentation":"

If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.

" } } }, - "DescribeFleetAdvisorLsaAnalysisResponse":{ + "DescribeMetadataModelExportsToTargetResponse":{ "type":"structure", "members":{ - "Analysis":{ - "shape":"FleetAdvisorLsaAnalysisResponseList", - "documentation":"

A list of FleetAdvisorLsaAnalysisResponse objects.

" - }, - "NextToken":{ + "Marker":{ "shape":"String", - "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "Requests":{ + "shape":"SchemaConversionRequestList", + "documentation":"

A paginated list of metadata model exports.

" } } }, - "DescribeFleetAdvisorSchemaObjectSummaryRequest":{ + "DescribeMetadataModelImportsMessage":{ "type":"structure", + "required":["MigrationProjectIdentifier"], "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, "Filters":{ "shape":"FilterList", - "documentation":"

If you specify any of the following filters, the output includes information for only those schema objects that meet the filter criteria:

  • schema-id – The ID of the schema, for example d4610ac5-e323-4ad9-bc50-eaf7249dfe9d.

Example: describe-fleet-advisor-schema-object-summary --filter Name=\"schema-id\",Values=\"50\"

" + "documentation":"

Filters applied to the metadata model imports described in the form of key-value pairs.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"

Sets the maximum number of records returned in the response.

" - }, - "NextToken":{ - "shape":"String", - "documentation":"

If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

A paginated list of metadata model imports.

" } } }, - "DescribeFleetAdvisorSchemaObjectSummaryResponse":{ + "DescribeMetadataModelImportsResponse":{ "type":"structure", "members":{ - "FleetAdvisorSchemaObjects":{ - "shape":"FleetAdvisorSchemaObjectList", - "documentation":"

A collection of FleetAdvisorSchemaObjectResponse objects.

" - }, - "NextToken":{ + "Marker":{ "shape":"String", - "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "Requests":{ + "shape":"SchemaConversionRequestList", + "documentation":"

A paginated list of metadata model imports.

" } } }, - "DescribeFleetAdvisorSchemasRequest":{ + "DescribeMigrationProjectsMessage":{ "type":"structure", "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

If you specify any of the following filters, the output includes information for only those schemas that meet the filter criteria:

  • complexity – The schema's complexity, for example Simple.

  • database-id – The ID of the schema's database.

  • database-ip-address – The IP address of the schema's database.

  • database-name – The name of the schema's database.

  • database-engine – The name of the schema database's engine.

  • original-schema-name – The name of the schema's database's main schema.

  • schema-id – The ID of the schema, for example 15.

  • schema-name – The name of the schema.

  • server-ip-address – The IP address of the schema database's server.

An example is: describe-fleet-advisor-schemas --filter Name=\"schema-id\",Values=\"50\"

" + "documentation":"

Filters applied to the migration projects described in the form of key-value pairs.

" }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"

Sets the maximum number of records returned in the response.

" + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, DMS includes a pagination token in the response so that you can retrieve the remaining results.

" }, - "NextToken":{ + "Marker":{ "shape":"String", - "documentation":"

If NextToken is returned by a previous response, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" } } }, - "DescribeFleetAdvisorSchemasResponse":{ + "DescribeMigrationProjectsResponse":{ "type":"structure", "members":{ - "FleetAdvisorSchemas":{ - "shape":"FleetAdvisorSchemaList", - "documentation":"

A collection of SchemaResponse objects.

" - }, - "NextToken":{ + "Marker":{ "shape":"String", - "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged.

" + "documentation":"

Specifies the unique pagination token that makes it possible to display the next page of results. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

If Marker is returned by a previous response, there are more results available. The value of Marker is a unique pagination token for each page. To retrieve the next page, make the call again using the returned token and keeping all other arguments unchanged.

" + }, + "MigrationProjects":{ + "shape":"MigrationProjectList", + "documentation":"

A description of migration projects.

" } } }, @@ -3856,6 +4956,59 @@ "type":"list", "member":{"shape":"EndpointSetting"} }, + "EngineVersion":{ + "type":"structure", + "members":{ + "Version":{ + "shape":"String", + "documentation":"

The version number of the replication instance.

" + }, + "Lifecycle":{ + "shape":"String", + "documentation":"

The lifecycle status of the replication instance version. Valid values are DEPRECATED, DEFAULT_VERSION, and ACTIVE.

" + }, + "ReleaseStatus":{ + "shape":"ReleaseStatusValues", + "documentation":"

The release status of the replication instance version.

" + }, + "LaunchDate":{ + "shape":"TStamp", + "documentation":"

The date when the replication instance version became publicly available.

" + }, + "AutoUpgradeDate":{ + "shape":"TStamp", + "documentation":"

The date when the replication instance will be automatically upgraded. This setting only applies if the auto-minor-version setting is enabled.

" + }, + "DeprecationDate":{ + "shape":"TStamp", + "documentation":"

The date when the replication instance version will be deprecated and can no longer be requested.

" + }, + "ForceUpgradeDate":{ + "shape":"TStamp", + "documentation":"

The date when the replication instance will have a version upgrade forced.

" + }, + "AvailableUpgrades":{ + "shape":"AvailableUpgradesList", + "documentation":"

The list of valid replication instance versions that you can upgrade to.

" + } + }, + "documentation":"

Provides information about a replication instance version.

" + }, + "EngineVersionList":{ + "type":"list", + "member":{"shape":"EngineVersion"} + }, + "ErrorDetails":{ + "type":"structure", + "members":{ + "defaultErrorDetails":{ + "shape":"DefaultErrorDetails", + "documentation":"

Error information about a project.

" + } + }, + "documentation":"

Provides error information about a project.

", + "union":true + }, "Event":{ "type":"structure", "members":{ @@ -3959,6 +5112,72 @@ "type":"list", "member":{"shape":"String"} }, + "ExportMetadataModelAssessmentMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"

A value that specifies the database objects to assess.

" + }, + "FileName":{ + "shape":"String", + "documentation":"

The name of the assessment file to create in your Amazon S3 bucket.

" + }, + "AssessmentReportTypes":{ + "shape":"AssessmentReportTypesList", + "documentation":"

The file format of the assessment file.

" + } + } + }, + "ExportMetadataModelAssessmentResponse":{ + "type":"structure", + "members":{ + "PdfReport":{ + "shape":"ExportMetadataModelAssessmentResultEntry", + "documentation":"

The Amazon S3 details for an assessment exported in PDF format.

" + }, + "CsvReport":{ + "shape":"ExportMetadataModelAssessmentResultEntry", + "documentation":"

The Amazon S3 details for an assessment exported in CSV format.

" + } + } + }, + "ExportMetadataModelAssessmentResultEntry":{ + "type":"structure", + "members":{ + "S3ObjectKey":{ + "shape":"String", + "documentation":"

The object key for the object containing the exported metadata model assessment.

" + }, + "ObjectURL":{ + "shape":"String", + "documentation":"

The URL for the object containing the exported metadata model assessment.

" + } + }, + "documentation":"

Provides information about an exported metadata model assessment.

" + }, + "ExportSqlDetails":{ + "type":"structure", + "members":{ + "S3ObjectKey":{ + "shape":"String", + "documentation":"

The Amazon S3 object key for the object containing the exported metadata model assessment.

" + }, + "ObjectURL":{ + "shape":"String", + "documentation":"

The URL for the object containing the exported metadata model assessment.

" + } + }, + "documentation":"

Provides information about a metadata model assessment exported to SQL.

" + }, "Filter":{ "type":"structure", "required":[ @@ -4184,6 +5403,56 @@ "type":"list", "member":{"shape":"String"} }, + "InstanceProfile":{ + "type":"structure", + "members":{ + "InstanceProfileArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) string that uniquely identifies the instance profile.

" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone where the instance profile runs.

" + }, + "KmsKeyArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the connection parameters for the instance profile.

If you don't specify a value for the KmsKeyArn parameter, then DMS uses your default encryption key.

KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region.

" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"

Specifies the accessibility options for the instance profile. A value of true represents an instance profile with a public IP address. A value of false represents an instance profile with a private IP address. The default value is true.

" + }, + "NetworkType":{ + "shape":"String", + "documentation":"

Specifies the network type for the instance profile. A value of IPV4 represents an instance profile with IPv4 network type and only supports IPv4 addressing. A value of IPV6 represents an instance profile with IPv6 network type and only supports IPv6 addressing. A value of DUAL represents an instance profile with dual network type that supports IPv4 and IPv6 addressing.

" + }, + "InstanceProfileName":{ + "shape":"String", + "documentation":"

The user-friendly name for the instance profile.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A description of the instance profile. Descriptions can have up to 31 characters. A description can contain only ASCII letters, digits, and hyphens ('-'). Also, it can't end with a hyphen or contain two consecutive hyphens, and can only begin with a letter.

" + }, + "InstanceProfileCreationTime":{ + "shape":"Iso8601DateTime", + "documentation":"

The time the instance profile was created.

" + }, + "SubnetGroupIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the subnet group that is associated with the instance profile.

" + }, + "VpcSecurityGroups":{ + "shape":"StringList", + "documentation":"

The VPC security groups that are used with the instance profile. The VPC security group must work with the VPC containing the instance profile.

" + } + }, + "documentation":"

Provides information that defines an instance profile.

" + }, + "InstanceProfileList":{ + "type":"list", + "member":{"shape":"InstanceProfile"} + }, "InsufficientResourceCapacityFault":{ "type":"structure", "members":{ @@ -4253,6 +5522,10 @@ }, "documentation":"

Describes a Fleet Advisor collector inventory.

" }, + "Iso8601DateTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "KMSAccessDeniedFault":{ "type":"structure", "members":{ @@ -4620,6 +5893,82 @@ }, "documentation":"

Provides information that defines a Microsoft SQL Server endpoint.

" }, + "MicrosoftSqlServerDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{ + "shape":"String", + "documentation":"

The name of the Microsoft SQL Server server.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port value for the Microsoft SQL Server data provider.

" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"

The database name on the Microsoft SQL Server data provider.

" + }, + "SslMode":{ + "shape":"DmsSslModeValue", + "documentation":"

The SSL mode used to connect to the Microsoft SQL Server data provider. The default value is none.

" + }, + "CertificateArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the certificate used for SSL connection.

" + } + }, + "documentation":"

Provides information that defines a Microsoft SQL Server data provider.

" + }, + "MigrationProject":{ + "type":"structure", + "members":{ + "MigrationProjectName":{ + "shape":"String", + "documentation":"

The name of the migration project.

" + }, + "MigrationProjectArn":{ + "shape":"String", + "documentation":"

The ARN string that uniquely identifies the migration project.

" + }, + "MigrationProjectCreationTime":{ + "shape":"Iso8601DateTime", + "documentation":"

The time when the migration project was created.

" + }, + "SourceDataProviderDescriptors":{ + "shape":"DataProviderDescriptorList", + "documentation":"

Information about the source data provider, including the name or ARN, and Secrets Manager parameters.

" + }, + "TargetDataProviderDescriptors":{ + "shape":"DataProviderDescriptorList", + "documentation":"

Information about the target data provider, including the name or ARN, and Secrets Manager parameters.

" + }, + "InstanceProfileArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the instance profile for your migration project.

" + }, + "InstanceProfileName":{ + "shape":"String", + "documentation":"

The name of the associated instance profile.

" + }, + "TransformationRules":{ + "shape":"String", + "documentation":"

The settings in JSON format for migration rules. Migration rules make it possible for you to change the object names according to the rules that you specify. For example, you can change an object name to lowercase or uppercase, add or remove a prefix or suffix, or rename objects.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A user-friendly description of the migration project.

" + }, + "SchemaConversionApplicationAttributes":{ + "shape":"SCApplicationAttributes", + "documentation":"

The schema conversion application attributes, including the Amazon S3 bucket name and Amazon S3 role ARN.

" + } + }, + "documentation":"

Provides information that defines a migration project.

" + }, + "MigrationProjectList":{ + "type":"list", + "member":{"shape":"MigrationProject"} + }, "MigrationTypeValue":{ "type":"string", "enum":[ @@ -4628,6 +5977,71 @@ "full-load-and-cdc" ] }, + "ModifyConversionConfigurationMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "ConversionConfiguration" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, + "ConversionConfiguration":{ + "shape":"String", + "documentation":"

The new conversion configuration.

" + } + } + }, + "ModifyConversionConfigurationResponse":{ + "type":"structure", + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The name or Amazon Resource Name (ARN) of the modified configuration.

" + } + } + }, + "ModifyDataProviderMessage":{ + "type":"structure", + "required":["DataProviderIdentifier"], + "members":{ + "DataProviderIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the data provider. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.

" + }, + "DataProviderName":{ + "shape":"String", + "documentation":"

The name of the data provider.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A user-friendly description of the data provider.

" + }, + "Engine":{ + "shape":"String", + "documentation":"

The type of database engine for the data provider. Valid values include \"aurora\", \"aurora_postgresql\", \"mysql\", \"oracle\", \"postgres\", and \"sqlserver\". A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.

" + }, + "ExactSettings":{ + "shape":"BooleanOptional", + "documentation":"

If this attribute is Y, the current call to ModifyDataProvider replaces all existing data provider settings with the exact settings that you specify in this call. If this attribute is N, the current call to ModifyDataProvider does two things:

  • It replaces any data provider settings that already exist with new values, for settings with the same names.

  • It creates new data provider settings that you specify in the call, for settings with different names.

" + }, + "Settings":{ + "shape":"DataProviderSettings", + "documentation":"

The settings in JSON format for a data provider.

" + } + } + }, + "ModifyDataProviderResponse":{ + "type":"structure", + "members":{ + "DataProvider":{ + "shape":"DataProvider", + "documentation":"

The data provider that was modified.

" + } + } + }, "ModifyEndpointMessage":{ "type":"structure", "required":["EndpointArn"], @@ -4815,6 +6229,104 @@ }, "documentation":"

" }, + "ModifyInstanceProfileMessage":{ + "type":"structure", + "required":["InstanceProfileIdentifier"], + "members":{ + "InstanceProfileIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the instance profile. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.

" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone where the instance profile runs.

" + }, + "KmsKeyArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the connection parameters for the instance profile.

If you don't specify a value for the KmsKeyArn parameter, then DMS uses your default encryption key.

KMS creates the default encryption key for your Amazon Web Services account. Your Amazon Web Services account has a different default encryption key for each Amazon Web Services Region.

" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"

Specifies the accessibility options for the instance profile. A value of true represents an instance profile with a public IP address. A value of false represents an instance profile with a private IP address. The default value is true.

" + }, + "NetworkType":{ + "shape":"String", + "documentation":"

Specifies the network type for the instance profile. A value of IPV4 represents an instance profile with IPv4 network type and only supports IPv4 addressing. A value of IPV6 represents an instance profile with IPv6 network type and only supports IPv6 addressing. A value of DUAL represents an instance profile with dual network type that supports IPv4 and IPv6 addressing.

" + }, + "InstanceProfileName":{ + "shape":"String", + "documentation":"

A user-friendly name for the instance profile.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A user-friendly description for the instance profile.

" + }, + "SubnetGroupIdentifier":{ + "shape":"String", + "documentation":"

A subnet group to associate with the instance profile.

" + }, + "VpcSecurityGroups":{ + "shape":"StringList", + "documentation":"

Specifies the VPC security groups to be used with the instance profile. The VPC security group must work with the VPC containing the instance profile.

" + } + } + }, + "ModifyInstanceProfileResponse":{ + "type":"structure", + "members":{ + "InstanceProfile":{ + "shape":"InstanceProfile", + "documentation":"

The instance profile that was modified.

" + } + } + }, + "ModifyMigrationProjectMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The identifier of the migration project. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens.

" + }, + "MigrationProjectName":{ + "shape":"String", + "documentation":"

A user-friendly name for the migration project.

" + }, + "SourceDataProviderDescriptors":{ + "shape":"DataProviderDescriptorDefinitionList", + "documentation":"

Information about the source data provider, including the name, ARN, and Amazon Web Services Secrets Manager parameters.

" + }, + "TargetDataProviderDescriptors":{ + "shape":"DataProviderDescriptorDefinitionList", + "documentation":"

Information about the target data provider, including the name, ARN, and Amazon Web Services Secrets Manager parameters.

" + }, + "InstanceProfileIdentifier":{ + "shape":"String", + "documentation":"

The name or Amazon Resource Name (ARN) for the instance profile.

" + }, + "TransformationRules":{ + "shape":"String", + "documentation":"

The settings in JSON format for migration rules. Migration rules make it possible for you to change the object names according to the rules that you specify. For example, you can change an object name to lowercase or uppercase, add or remove a prefix or suffix, or rename objects.

" + }, + "Description":{ + "shape":"String", + "documentation":"

A user-friendly description of the migration project.

" + }, + "SchemaConversionApplicationAttributes":{ + "shape":"SCApplicationAttributes", + "documentation":"

The schema conversion application attributes, including the Amazon S3 bucket name and Amazon S3 role ARN.

" + } + } + }, + "ModifyMigrationProjectResponse":{ + "type":"structure", + "members":{ + "MigrationProject":{ + "shape":"MigrationProject", + "documentation":"

The migration project that was modified.

" + } + } + }, "ModifyReplicationConfigMessage":{ "type":"structure", "required":["ReplicationConfigArn"], @@ -5177,6 +6689,28 @@ }, "documentation":"

Provides information that defines a MySQL endpoint.

" }, + "MySqlDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{ + "shape":"String", + "documentation":"

The name of the MySQL server.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port value for the MySQL data provider.

" + }, + "SslMode":{ + "shape":"DmsSslModeValue", + "documentation":"

The SSL mode used to connect to the MySQL data provider. The default value is none.

" + }, + "CertificateArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the certificate used for SSL connection.

" + } + }, + "documentation":"

Provides information that defines a MySQL data provider.

" + }, "NeptuneSettings":{ "type":"structure", "required":[ @@ -5222,6 +6756,52 @@ "one" ] }, + "OracleDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{ + "shape":"String", + "documentation":"

The name of the Oracle server.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port value for the Oracle data provider.

" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"

The database name on the Oracle data provider.

" + }, + "SslMode":{ + "shape":"DmsSslModeValue", + "documentation":"

The SSL mode used to connect to the Oracle data provider. The default value is none.

" + }, + "CertificateArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the certificate used for SSL connection.

" + }, + "AsmServer":{ + "shape":"String", + "documentation":"

The address of your Oracle Automatic Storage Management (ASM) server. You can set this value from the asm_server value. You set asm_server as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see Configuration for change data capture (CDC) on an Oracle source database.

" + }, + "SecretsManagerOracleAsmSecretId":{ + "shape":"String", + "documentation":"

The identifier of the secret in Secrets Manager that contains the Oracle ASM connection details.

Required only if your data provider uses the Oracle ASM server.

" + }, + "SecretsManagerOracleAsmAccessRoleArn":{ + "shape":"String", + "documentation":"

The ARN of the IAM role that provides access to the secret in Secrets Manager that contains the Oracle ASM connection details.

" + }, + "SecretsManagerSecurityDbEncryptionSecretId":{ + "shape":"String", + "documentation":"

The identifier of the secret in Secrets Manager that contains the transparent data encryption (TDE) password. DMS requires this password to access Oracle redo logs encrypted by TDE using Binary Reader.

" + }, + "SecretsManagerSecurityDbEncryptionAccessRoleArn":{ + "shape":"String", + "documentation":"

The ARN of the IAM role that provides access to the secret in Secrets Manager that contains the TDE password.

" + } + }, + "documentation":"

Provides information that defines an Oracle data provider.

" + }, "OracleSettings":{ "type":"structure", "members":{ @@ -5446,6 +7026,13 @@ "type":"list", "member":{"shape":"OrderableReplicationInstance"} }, + "OriginTypeValue":{ + "type":"string", + "enum":[ + "SOURCE", + "TARGET" + ] + }, "ParquetVersionValue":{ "type":"string", "enum":[ @@ -5592,7 +7179,7 @@ }, "DatabaseMode":{ "shape":"DatabaseMode", - "documentation":"

Specifies whether to use default or custom replication behavior for PostgreSQL-compatible endpoints. You can use this setting to specify replication behavior for endpoints that require additional configuration, such as Babelfish endpoints.

" + "documentation":"

Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.

" }, "BabelfishDatabaseName":{ "shape":"String", @@ -5601,6 +7188,32 @@ }, "documentation":"

Provides information that defines a PostgreSQL endpoint.

" }, + "PostgreSqlDataProviderSettings":{ + "type":"structure", + "members":{ + "ServerName":{ + "shape":"String", + "documentation":"

The name of the PostgreSQL server.

" + }, + "Port":{ + "shape":"IntegerOptional", + "documentation":"

The port value for the PostgreSQL data provider.

" + }, + "DatabaseName":{ + "shape":"String", + "documentation":"

The database name on the PostgreSQL data provider.

" + }, + "SslMode":{ + "shape":"DmsSslModeValue", + "documentation":"

The SSL mode used to connect to the PostgreSQL data provider. The default value is none.

" + }, + "CertificateArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the certificate used for SSL connection.

" + } + }, + "documentation":"

Provides information that defines a PostgreSQL data provider.

" + }, "ProvisionData":{ "type":"structure", "members":{ @@ -7093,6 +8706,20 @@ }, "documentation":"

Settings for exporting data to Amazon S3.

" }, + "SCApplicationAttributes":{ + "type":"structure", + "members":{ + "S3BucketPath":{ + "shape":"String", + "documentation":"

The path for the Amazon S3 bucket that the application uses for exporting assessment reports.

" + }, + "S3BucketRoleArn":{ + "shape":"String", + "documentation":"

The ARN for the role the application uses to access its Amazon S3 bucket.

" + } + }, + "documentation":"

Provides information that defines a schema conversion application.

" + }, "SNSInvalidTopicFault":{ "type":"structure", "members":{ @@ -7123,6 +8750,30 @@ "shared-automatic-truncation" ] }, + "SchemaConversionRequest":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"String", + "documentation":"

The schema conversion action status.

" + }, + "RequestIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the schema conversion action.

" + }, + "MigrationProjectArn":{ + "shape":"String", + "documentation":"

The migration project ARN.

" + }, + "Error":{"shape":"ErrorDetails"}, + "ExportSqlDetails":{"shape":"ExportSqlDetails"} + }, + "documentation":"

Provides information about a schema conversion action.

" + }, + "SchemaConversionRequestList":{ + "type":"list", + "member":{"shape":"SchemaConversionRequest"} + }, "SchemaList":{ "type":"list", "member":{"shape":"String"} @@ -7229,6 +8880,177 @@ "ssl-encryption" ] }, + "StartExtensionPackAssociationMessage":{ + "type":"structure", + "required":["MigrationProjectIdentifier"], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + } + } + }, + "StartExtensionPackAssociationResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the request operation.

" + } + } + }, + "StartMetadataModelAssessmentMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"

A value that specifies the database objects to assess.

" + } + } + }, + "StartMetadataModelAssessmentResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the assessment operation.

" + } + } + }, + "StartMetadataModelConversionMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"

A value that specifies the database objects to convert.

" + } + } + }, + "StartMetadataModelConversionResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the conversion operation.

" + } + } + }, + "StartMetadataModelExportAsScriptMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules", + "Origin" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"

A value that specifies the database objects to export.

" + }, + "Origin":{ + "shape":"OriginTypeValue", + "documentation":"

Whether to export the metadata model from the source or the target.

" + }, + "FileName":{ + "shape":"String", + "documentation":"

The name of the model file to create in the Amazon S3 bucket.

" + } + } + }, + "StartMetadataModelExportAsScriptResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the export operation.

" + } + } + }, + "StartMetadataModelExportToTargetMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"

A value that specifies the database objects to export.

" + }, + "OverwriteExtensionPack":{ + "shape":"BooleanOptional", + "documentation":"

Whether to overwrite the migration project extension pack. An extension pack is an add-on module that emulates functions present in a source database that are required when converting objects to the target database.

" + } + } + }, + "StartMetadataModelExportToTargetResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the export operation.

" + } + } + }, + "StartMetadataModelImportMessage":{ + "type":"structure", + "required":[ + "MigrationProjectIdentifier", + "SelectionRules", + "Origin" + ], + "members":{ + "MigrationProjectIdentifier":{ + "shape":"String", + "documentation":"

The migration project name or Amazon Resource Name (ARN).

" + }, + "SelectionRules":{ + "shape":"String", + "documentation":"

A value that specifies the database objects to import.

" + }, + "Origin":{ + "shape":"OriginTypeValue", + "documentation":"

Whether to load metadata to the source or target database.

" + }, + "Refresh":{ + "shape":"Boolean", + "documentation":"

If true, DMS loads metadata for the specified objects from the source database.

" + } + } + }, + "StartMetadataModelImportResponse":{ + "type":"structure", + "members":{ + "RequestIdentifier":{ + "shape":"String", + "documentation":"

The identifier for the import operation.

" + } + } + }, "StartRecommendationsRequest":{ "type":"structure", "required":[ diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index cafd4834e6e..91a9ed29312 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index 40006e968f7..a96df35d8c9 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index 5468664106d..f17c62eba44 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index dcf9efb5f3f..8ba289c3d00 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/datasync/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/datasync/src/main/resources/codegen-resources/endpoint-rule-set.json index d1fda177e76..0d32931aa5a 100644 --- a/services/datasync/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/datasync/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://datasync-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://datasync-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://datasync-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://datasync-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://datasync.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://datasync.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://datasync.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://datasync.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/datasync/src/main/resources/codegen-resources/service-2.json b/services/datasync/src/main/resources/codegen-resources/service-2.json index 26461af8c8c..1260e47922d 100644 --- a/services/datasync/src/main/resources/codegen-resources/service-2.json +++ b/services/datasync/src/main/resources/codegen-resources/service-2.json @@ -57,6 +57,20 @@ ], "documentation":"

Activates an DataSync agent that you've deployed in your storage environment. The activation process associates the agent with your Amazon Web Services account.

If you haven't deployed an agent yet, see the following topics to learn more:

If you're transferring between Amazon Web Services storage services, you don't need a DataSync agent.

" }, + "CreateLocationAzureBlob":{ + "name":"CreateLocationAzureBlob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLocationAzureBlobRequest"}, + "output":{"shape":"CreateLocationAzureBlobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Creates an endpoint for a Microsoft Azure Blob Storage container that DataSync can use as a transfer source or destination.

Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.

" + }, "CreateLocationEfs":{ "name":"CreateLocationEfs", "http":{ @@ -153,7 +167,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Defines a file system on a Network File System (NFS) server that can be read from or written to.

" + "documentation":"

Creates an endpoint for a Network File System (NFS) file server that DataSync can use for a data transfer.

For more information, see Configuring transfers to or from an NFS file server.

If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone.

" }, "CreateLocationObjectStorage":{ "name":"CreateLocationObjectStorage", @@ -195,7 +209,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for a Server Message Block (SMB) file server that DataSync can access for a transfer. For more information, see Creating an SMB location.

" + "documentation":"

Creates an endpoint for a Server Message Block (SMB) file server that DataSync can use for a data transfer.

Before you begin, make sure that you understand how DataSync accesses an SMB file server.

" }, "CreateTask":{ "name":"CreateTask", @@ -282,6 +296,20 @@ "documentation":"

Returns information about a DataSync discovery job.

", "endpoint":{"hostPrefix":"discovery-"} }, + "DescribeLocationAzureBlob":{ + "name":"DescribeLocationAzureBlob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationAzureBlobRequest"}, + "output":{"shape":"DescribeLocationAzureBlobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Provides details about how an DataSync transfer location for Microsoft Azure Blob Storage is configured.

" + }, "DescribeLocationEfs":{ "name":"DescribeLocationEfs", "http":{ @@ -378,7 +406,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata, such as the path information, about an NFS location.

" + "documentation":"

Provides details about how an DataSync transfer location for a Network File System (NFS) file server is configured.

" }, "DescribeLocationObjectStorage":{ "name":"DescribeLocationObjectStorage", @@ -479,7 +507,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata about a task.

" + "documentation":"

Provides information about an DataSync transfer task.

" }, "DescribeTaskExecution":{ "name":"DescribeTaskExecution", @@ -493,7 +521,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns detailed metadata about a task that is being executed.

" + "documentation":"

Provides information about an DataSync transfer task that's running.

" }, "GenerateRecommendations":{ "name":"GenerateRecommendations", @@ -726,6 +754,20 @@ "documentation":"

Edits a DataSync discovery job configuration.

", "endpoint":{"hostPrefix":"discovery-"} }, + "UpdateLocationAzureBlob":{ + "name":"UpdateLocationAzureBlob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationAzureBlobRequest"}, + "output":{"shape":"UpdateLocationAzureBlobResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.

" + }, "UpdateLocationHdfs":{ "name":"UpdateLocationHdfs", "http":{ @@ -752,7 +794,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Updates some of the parameters of a previously created location for Network File System (NFS) access. For information about creating an NFS location, see Creating a location for NFS.

" + "documentation":"

Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync.

For more information, see Configuring transfers to or from an NFS file server.

" }, "UpdateLocationObjectStorage":{ "name":"UpdateLocationObjectStorage", @@ -852,7 +894,7 @@ }, "AgentArns":{ "shape":"DiscoveryAgentArnList", - "documentation":"

Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.

" }, "CloudWatchLogGroupArn":{ "shape":"LogGroupArn", @@ -934,6 +976,50 @@ "BEST_EFFORT" ] }, + "AzureAccessTier":{ + "type":"string", + "enum":[ + "HOT", + "COOL", + "ARCHIVE" + ] + }, + "AzureBlobAuthenticationType":{ + "type":"string", + "enum":["SAS"] + }, + "AzureBlobContainerUrl":{ + "type":"string", + "max":325, + "pattern":"^https:\\/\\/[A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252}\\/[a-z0-9](-?[a-z0-9]){2,62}$" + }, + "AzureBlobSasConfiguration":{ + "type":"structure", + "required":["Token"], + "members":{ + "Token":{ + "shape":"AzureBlobSasToken", + "documentation":"

Specifies a SAS token that provides permissions at the Azure storage account, container, or folder level.

The token is part of the SAS URI string that comes after the storage resource URI and a question mark. A token looks something like this:

sp=r&st=2023-12-20T14:54:52Z&se=2023-12-20T22:54:52Z&spr=https&sv=2021-06-08&sr=c&sig=aBBKDWQvyuVcTPH9EBp%2FXTI9E%2F%2Fmq171%2BZU178wcwqU%3D

" + } + }, + "documentation":"

The shared access signature (SAS) configuration that allows DataSync to access your Microsoft Azure Blob Storage.

For more information, see SAS tokens for accessing your Azure Blob Storage.

" + }, + "AzureBlobSasToken":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^.+$", + "sensitive":true + }, + "AzureBlobSubdirectory":{ + "type":"string", + "max":1024, + "pattern":"^[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}\\p{C}]*$" + }, + "AzureBlobType":{ + "type":"string", + "enum":["BLOCK"] + }, "BytesPerSecond":{ "type":"long", "min":-1 @@ -968,6 +1054,10 @@ "LogicalUsed":{ "shape":"NonNegativeLong", "documentation":"

The amount of space that's being used in a storage system resource without accounting for compression or deduplication.

" + }, + "ClusterCloudStorageUsed":{ + "shape":"NonNegativeLong", + "documentation":"

The amount of space in the cluster that's in cloud storage (for example, if you're using data tiering).

" } }, "documentation":"

The storage capacity of an on-premises storage system resource (for example, a volume).

" @@ -999,11 +1089,11 @@ }, "SubnetArns":{ "shape":"PLSubnetArnList", - "documentation":"

Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer.

" + "documentation":"

Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer. You can only specify one ARN.

" }, "SecurityGroupArns":{ "shape":"PLSecurityGroupArnList", - "documentation":"

Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint. You can only specify one ARN.

" } }, "documentation":"

CreateAgentRequest

" @@ -1018,6 +1108,57 @@ }, "documentation":"

CreateAgentResponse

" }, + "CreateLocationAzureBlobRequest":{ + "type":"structure", + "required":[ + "ContainerUrl", + "AuthenticationType", + "AgentArns" + ], + "members":{ + "ContainerUrl":{ + "shape":"AzureBlobContainerUrl", + "documentation":"

Specifies the URL of the Azure Blob Storage container involved in your transfer.

" + }, + "AuthenticationType":{ + "shape":"AzureBlobAuthenticationType", + "documentation":"

Specifies the authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).

" + }, + "SasConfiguration":{ + "shape":"AzureBlobSasConfiguration", + "documentation":"

Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.

" + }, + "BlobType":{ + "shape":"AzureBlobType", + "documentation":"

Specifies the type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.

" + }, + "AccessTier":{ + "shape":"AzureAccessTier", + "documentation":"

Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.

" + }, + "Subdirectory":{ + "shape":"AzureBlobSubdirectory", + "documentation":"

Specifies path segments if you want to limit your transfer to a virtual directory in your container (for example, /my/images).

" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.

You can specify more than one agent. For more information, see Using multiple agents for your transfer.

" + }, + "Tags":{ + "shape":"InputTagList", + "documentation":"

Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your transfer location.

" + } + } + }, + "CreateLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The ARN of the Azure Blob Storage transfer location that you created.

" + } + } + }, "CreateLocationEfsRequest":{ "type":"structure", "required":[ @@ -1304,23 +1445,23 @@ "members":{ "Subdirectory":{ "shape":"NfsSubdirectory", - "documentation":"

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" + "documentation":"

Specifies the export path in your NFS file server that you want DataSync to mount.

This path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see Accessing NFS file servers.

" }, "ServerHostname":{ "shape":"ServerHostname", - "documentation":"

The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this hostname to mount the NFS server in a network.

If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.

This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.

" + "documentation":"

Specifies the Domain Name System (DNS) name or IP version 4 address of the NFS file server that your DataSync agent connects to.

" }, "OnPremConfig":{ "shape":"OnPremConfig", - "documentation":"

Contains a list of Amazon Resource Names (ARNs) of agents that are used to connect to an NFS server.

If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server.

You can specify more than one agent. For more information, see Using multiple agents for transfers.

" }, "MountOptions":{ "shape":"NfsMountOptions", - "documentation":"

The NFS mount options that DataSync can use to mount your NFS share.

" + "documentation":"

Specifies the options that DataSync can use to mount your NFS file server.

" }, "Tags":{ "shape":"InputTagList", - "documentation":"

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

" + "documentation":"

Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.

" } }, "documentation":"

CreateLocationNfsRequest

" @@ -1330,7 +1471,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the source NFS file system location that is created.

" + "documentation":"

The ARN of the transfer location that you created for your NFS file server.

" } }, "documentation":"

CreateLocationNfsResponse

" @@ -1698,6 +1839,49 @@ } } }, + "DescribeLocationAzureBlobRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

Specifies the Amazon Resource Name (ARN) of your Azure Blob Storage transfer location.

" + } + } + }, + "DescribeLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

The ARN of your Azure Blob Storage transfer location.

" + }, + "LocationUri":{ + "shape":"LocationUri", + "documentation":"

The URL of the Azure Blob Storage container involved in your transfer.

" + }, + "AuthenticationType":{ + "shape":"AzureBlobAuthenticationType", + "documentation":"

The authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).

" + }, + "BlobType":{ + "shape":"AzureBlobType", + "documentation":"

The type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.

" + }, + "AccessTier":{ + "shape":"AzureAccessTier", + "documentation":"

The access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.

" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"

The ARNs of the DataSync agents that can connect with your Azure Blob Storage container.

" + }, + "CreationTime":{ + "shape":"Time", + "documentation":"

The time that your Azure Blob Storage transfer location was created.

" + } + } + }, "DescribeLocationEfsRequest":{ "type":"structure", "required":["LocationArn"], @@ -1954,7 +2138,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the NFS location to describe.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the NFS location that you want information about.

" } }, "documentation":"

DescribeLocationNfsRequest

" @@ -1964,20 +2148,20 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the NFS location that was described.

" + "documentation":"

The ARN of the NFS location.

" }, "LocationUri":{ "shape":"LocationUri", - "documentation":"

The URL of the source NFS location that was described.

" + "documentation":"

The URL of the NFS location.

" }, "OnPremConfig":{"shape":"OnPremConfig"}, "MountOptions":{ "shape":"NfsMountOptions", - "documentation":"

The NFS mount options that DataSync used to mount your NFS share.

" + "documentation":"

The mount options that DataSync uses to mount your NFS file server.

" }, "CreationTime":{ "shape":"Time", - "documentation":"

The time that the NFS location was created.

" + "documentation":"

The time when the NFS location was created.

" } }, "documentation":"

DescribeLocationNfsResponse

" @@ -2272,7 +2456,7 @@ "members":{ "TaskExecutionArn":{ "shape":"TaskExecutionArn", - "documentation":"

The Amazon Resource Name (ARN) of the task that is being executed.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the transfer task that's running.

" } }, "documentation":"

DescribeTaskExecutionRequest

" @@ -2338,7 +2522,7 @@ "members":{ "TaskArn":{ "shape":"TaskArn", - "documentation":"

The Amazon Resource Name (ARN) of the task to describe.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the transfer task.

" } }, "documentation":"

DescribeTaskRequest

" @@ -3325,6 +3509,10 @@ "LunCount":{ "shape":"NonNegativeLong", "documentation":"

The number of LUNs (logical unit numbers) in the cluster.

" + }, + "ClusterCloudStorageUsed":{ + "shape":"NonNegativeLong", + "documentation":"

The amount of space in the cluster that's in cloud storage (for example, if you're using data tiering).

" } }, "documentation":"

The information that DataSync Discovery collects about an on-premises storage system cluster.

" @@ -3562,10 +3750,10 @@ "members":{ "AgentArns":{ "shape":"AgentArnList", - "documentation":"

ARNs of the agents to use for an NFS location.

" + "documentation":"

The Amazon Resource Names (ARNs) of the agents connecting to a transfer location.

" } }, - "documentation":"

A list of Amazon Resource Names (ARNs) of agents to use for a Network File System (NFS) location.

" + "documentation":"

The DataSync agents that are connecting to a Network File System (NFS) location.

" }, "Operator":{ "type":"string", @@ -3725,22 +3913,22 @@ "members":{ "VpcEndpointId":{ "shape":"VpcEndpointId", - "documentation":"

The ID of the VPC endpoint that is configured for an agent. An agent that is configured with a VPC endpoint will not be accessible over the public internet.

" + "documentation":"

Specifies the ID of the VPC endpoint that your agent connects to.

" }, "PrivateLinkEndpoint":{ "shape":"Endpoint", - "documentation":"

The private endpoint that is configured for an agent that has access to IP addresses in a PrivateLink. An agent that is configured with this endpoint will not be accessible over the public internet.

" + "documentation":"

Specifies the VPC endpoint provided by Amazon Web Services PrivateLink that your agent connects to.

" }, "SubnetArns":{ "shape":"PLSubnetArnList", - "documentation":"

The Amazon Resource Names (ARNs) of the subnets that are configured for an agent activated in a VPC or an agent that has access to a VPC endpoint.

" + "documentation":"

Specifies the ARN of the subnet where your VPC endpoint is located. You can only specify one ARN.

" }, "SecurityGroupArns":{ "shape":"PLSecurityGroupArnList", - "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are configured for the EC2 resource that hosts an agent activated in a VPC or an agent that has access to a VPC endpoint.

" + "documentation":"

Specifies the Amazon Resource Names (ARN) of the security group that provides DataSync access to your VPC endpoint. You can only specify one ARN.

" } }, - "documentation":"

The VPC endpoint, subnet, and security group that an agent uses to access IP addresses in a VPC (Virtual Private Cloud).

" + "documentation":"

Specifies how your DataSync agent connects to Amazon Web Services using a virtual private cloud (VPC) service endpoint. An agent that uses a VPC endpoint isn't accessible over the public internet.

" }, "PtolemyBoolean":{"type":"boolean"}, "PtolemyPassword":{ @@ -4443,6 +4631,45 @@ "members":{ } }, + "UpdateLocationAzureBlobRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

Specifies the ARN of the Azure Blob Storage transfer location that you're updating.

" + }, + "Subdirectory":{ + "shape":"AzureBlobSubdirectory", + "documentation":"

Specifies path segments if you want to limit your transfer to a virtual directory in your container (for example, /my/images).

" + }, + "AuthenticationType":{ + "shape":"AzureBlobAuthenticationType", + "documentation":"

Specifies the authentication method DataSync uses to access your Azure Blob Storage. DataSync can access blob storage using a shared access signature (SAS).

" + }, + "SasConfiguration":{ + "shape":"AzureBlobSasConfiguration", + "documentation":"

Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.

" + }, + "BlobType":{ + "shape":"AzureBlobType", + "documentation":"

Specifies the type of blob that you want your objects or files to be when transferring them into Azure Blob Storage. Currently, DataSync only supports moving data into Azure Blob Storage as block blobs. For more information on blob types, see the Azure Blob Storage documentation.

" + }, + "AccessTier":{ + "shape":"AzureAccessTier", + "documentation":"

Specifies the access tier that you want your objects or files transferred into. This only applies when using the location as a transfer destination. For more information, see Access tiers.

" + }, + "AgentArns":{ + "shape":"AgentArnList", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.

You can specify more than one agent. For more information, see Using multiple agents for your transfer.

" + } + } + }, + "UpdateLocationAzureBlobResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateLocationHdfsRequest":{ "type":"structure", "required":["LocationArn"], @@ -4512,11 +4739,11 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the NFS location to update.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the NFS transfer location that you want to update.

" }, "Subdirectory":{ "shape":"NfsSubdirectory", - "documentation":"

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server, run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder that you specified, DataSync must have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the files you want DataSync to access have permissions that allow read access for all users. Doing either option enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" + "documentation":"

Specifies the export path in your NFS file server that you want DataSync to mount.

This path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see Accessing NFS file servers.

" }, "OnPremConfig":{"shape":"OnPremConfig"}, "MountOptions":{"shape":"NfsMountOptions"} @@ -4620,7 +4847,7 @@ }, "AgentArns":{ "shape":"DiscoveryAgentArnList", - "documentation":"

Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system. You can only specify one ARN.

" }, "Name":{ "shape":"Name", diff --git a/services/dax/pom.xml b/services/dax/pom.xml index 549ab9a2181..e60dd7a979e 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index 86c2440d7df..d0a03d0cbe5 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/detective/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/detective/src/main/resources/codegen-resources/endpoint-rule-set.json index 7227bac202c..fa378ddc6d0 100644 --- a/services/detective/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/detective/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://api.detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://api.detective-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://api.detective-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.detective.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://api.detective.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://api.detective.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://api.detective.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/detective/src/main/resources/codegen-resources/service-2.json b/services/detective/src/main/resources/codegen-resources/service-2.json index 62607e86f39..297e16c60ec 100644 --- a/services/detective/src/main/resources/codegen-resources/service-2.json +++ b/services/detective/src/main/resources/codegen-resources/service-2.json @@ -754,7 +754,7 @@ "type":"string", "max":64, "min":1, - "pattern":"^.+@(?:(?:(?!-)[A-Za-z0-9-]{1,62})?[A-Za-z0-9]{1}\\.)+[A-Za-z]{2,6}$" + "pattern":"^.+@(?:(?:(?!-)[A-Za-z0-9-]{1,62})?[A-Za-z0-9]{1}\\.)+[A-Za-z]{2,63}$" }, "EmailMessage":{ "type":"string", diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index a4bf70d05eb..1b5bebe6309 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 8b6cce866ec..6b5866e4e08 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 71e51720f87..82438467b1c 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directory/pom.xml b/services/directory/pom.xml index 2618b7eb708..d3f0027ee4f 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index f4038e3290b..fd66c0bbc4e 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index 519f68764b0..ea2df4c1a78 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdbelastic/pom.xml b/services/docdbelastic/pom.xml index 090f135a951..6a89affe5b6 100644 --- a/services/docdbelastic/pom.xml +++ b/services/docdbelastic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT docdbelastic AWS Java SDK :: Services :: Doc DB Elastic diff --git a/services/drs/pom.xml b/services/drs/pom.xml index ea9d9559200..bb1c4a81a46 100644 --- a/services/drs/pom.xml +++ b/services/drs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT drs AWS Java SDK :: Services :: Drs diff --git a/services/drs/src/main/resources/codegen-resources/service-2.json b/services/drs/src/main/resources/codegen-resources/service-2.json index db8d44dd9fe..8da10f76c5c 100644 --- a/services/drs/src/main/resources/codegen-resources/service-2.json +++ b/services/drs/src/main/resources/codegen-resources/service-2.json @@ -4050,7 +4050,8 @@ "type":"string", "enum":[ "NONE", - "BASIC" + "BASIC", + "IN_AWS" ] }, "TerminateRecoveryInstancesRequest":{ diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index e47b1ab1dbb..a8bb59e1249 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json index c8a697c1553..78bc5ce6f4f 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json @@ -692,7 +692,7 @@ {"shape":"RequestLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

", + "documentation":"

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey value is also returned and the requestor can use the LastEvaluatedKey to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression, a scan result can result in no items meeting the criteria and the Count will result in zero. If you did not use a FilterExpression in the scan request, then Count is the same as ScannedCount.

Count and ScannedCount only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table.

A single Scan operation first reads up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

By default, a Scan uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead parameter to true. Strong consistency only relates to the consistency of the read at the item level.

DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested.

", "endpointdiscovery":{ } }, @@ -1378,7 +1378,7 @@ "members":{ "Responses":{ "shape":"PartiQLBatchResponse", - "documentation":"

The response to each PartiQL statement in the batch.

" + "documentation":"

The response to each PartiQL statement in the batch. The values of the list are ordered according to the ordering of the request statements.

" }, "ConsumedCapacity":{ "shape":"ConsumedCapacityMultiple", @@ -5938,7 +5938,7 @@ }, "ReturnValuesOnConditionCheckFailure":{ "shape":"ReturnValuesOnConditionCheckFailure", - "documentation":"

Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.

" + "documentation":"

Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid values are: NONE and ALL_OLD.

" } }, "documentation":"

Represents a request to perform an UpdateItem operation.

" diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index 50620bb61f9..fcc647fe8fa 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ebs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/ebs/src/main/resources/codegen-resources/endpoint-rule-set.json index 66091be73dc..5a6ff8bebb8 100644 --- a/services/ebs/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/ebs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ebs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ebs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ebs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ebs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://ebs-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ebs.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ebs.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://ebs.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/ebs/src/main/resources/codegen-resources/endpoint-tests.json b/services/ebs/src/main/resources/codegen-resources/endpoint-tests.json index 91c92af29cd..3d3883d4240 100644 --- a/services/ebs/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/ebs/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,16 +1,632 @@ { "testCases": [ { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.af-south-1.amazonaws.com" + } + }, + "params": { + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-east-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-northeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-northeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-south-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-southeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-southeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ap-southeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-central-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-north-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-south-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-west-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.eu-west-3.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.me-south-1.amazonaws.com" + } + }, + "params": { + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.sa-east-1.amazonaws.com" + } + }, + "params": { + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ebs-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -20,9 +636,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -32,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/ebs/src/main/resources/codegen-resources/service-2.json b/services/ebs/src/main/resources/codegen-resources/service-2.json index bea4106a85f..839422e2c6c 100644 --- a/services/ebs/src/main/resources/codegen-resources/service-2.json +++ b/services/ebs/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Seals and completes the snapshot after all of the required blocks of data have been written to it. Completing the snapshot changes the status to completed. You cannot write new blocks to a snapshot after it has been completed.

" + "documentation":"

Seals and completes the snapshot after all of the required blocks of data have been written to it. Completing the snapshot changes the status to completed. You cannot write new blocks to a snapshot after it has been completed.

You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.

" }, "GetSnapshotBlock":{ "name":"GetSnapshotBlock", @@ -47,7 +47,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns the data in a block in an Amazon Elastic Block Store snapshot.

" + "documentation":"

Returns the data in a block in an Amazon Elastic Block Store snapshot.

You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.

" }, "ListChangedBlocks":{ "name":"ListChangedBlocks", @@ -65,7 +65,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns information about the blocks that are different between two Amazon Elastic Block Store snapshots of the same volume/snapshot lineage.

" + "documentation":"

Returns information about the blocks that are different between two Amazon Elastic Block Store snapshots of the same volume/snapshot lineage.

You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.

" }, "ListSnapshotBlocks":{ "name":"ListSnapshotBlocks", @@ -83,7 +83,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns information about the blocks in an Amazon Elastic Block Store snapshot.

" + "documentation":"

Returns information about the blocks in an Amazon Elastic Block Store snapshot.

You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.

" }, "PutSnapshotBlock":{ "name":"PutSnapshotBlock", @@ -102,7 +102,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], - "documentation":"

Writes a block of data to a snapshot. If the specified block contains data, the existing data is overwritten. The target snapshot must be in the pending state.

Data written to a snapshot must be aligned with 512-KiB sectors.

", + "documentation":"

Writes a block of data to a snapshot. If the specified block contains data, the existing data is overwritten. The target snapshot must be in the pending state.

Data written to a snapshot must be aligned with 512-KiB sectors.

You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.

", "authtype":"v4-unsigned-body" }, "StartSnapshot":{ @@ -124,7 +124,7 @@ {"shape":"ConcurrentLimitExceededException"}, {"shape":"ConflictException"} ], - "documentation":"

Creates a new Amazon EBS snapshot. The new snapshot enters the pending state after the request completes.

After creating the snapshot, use PutSnapshotBlock to write blocks of data to the snapshot.

" + "documentation":"

Creates a new Amazon EBS snapshot. The new snapshot enters the pending state after the request completes.

After creating the snapshot, use PutSnapshotBlock to write blocks of data to the snapshot.

You should always retry requests that receive server (5xx) error responses, and ThrottlingException and RequestThrottledException client error responses. For more information see Error retries in the Amazon Elastic Compute Cloud User Guide.

" } }, "shapes":{ @@ -370,7 +370,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

An internal error has occurred.

", + "documentation":"

An internal error has occurred. For more information see Error retries.

", "error":{"httpStatusCode":500}, "exception":true, "fault":true @@ -599,7 +599,7 @@ "documentation":"

The reason for the exception.

" } }, - "documentation":"

The number of API requests has exceed the maximum allowed API request throttling limit.

", + "documentation":"

The number of API requests has exceeded the maximum allowed API request throttling limit for the snapshot. For more information see Error retries.

", "error":{"httpStatusCode":400}, "exception":true }, @@ -628,7 +628,17 @@ "type":"string", "enum":[ "SNAPSHOT_NOT_FOUND", - "DEPENDENCY_RESOURCE_NOT_FOUND" + "GRANT_NOT_FOUND", + "DEPENDENCY_RESOURCE_NOT_FOUND", + "IMAGE_NOT_FOUND" + ] + }, + "SSEType":{ + "type":"string", + "enum":[ + "sse-ebs", + "sse-kms", + "none" ] }, "ServiceQuotaExceededException":{ @@ -735,6 +745,10 @@ "KmsKeyArn":{ "shape":"KmsKeyArn", "documentation":"

The Amazon Resource Name (ARN) of the Key Management Service (KMS) key used to encrypt the snapshot.

" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"

Reserved for future use.

" } } }, @@ -800,6 +814,7 @@ "INVALID_CUSTOMER_KEY", "INVALID_PAGE_TOKEN", "INVALID_BLOCK_TOKEN", + "INVALID_GRANT_TOKEN", "INVALID_SNAPSHOT_ID", "UNRELATED_SNAPSHOTS", "INVALID_BLOCK", @@ -808,7 +823,9 @@ "INVALID_DEPENDENCY_REQUEST", "INVALID_PARAMETER_VALUE", "INVALID_VOLUME_SIZE", - "CONFLICTING_BLOCK_UPDATE" + "CONFLICTING_BLOCK_UPDATE", + "INVALID_IMAGE_ID", + "WRITE_REQUEST_TIMEOUT" ] }, "VolumeSize":{ diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index 8d281a90f9c..d40cb3d9c82 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/ec2/src/main/resources/codegen-resources/endpoint-rule-set.json index bdc98a72139..287f71554ec 100644 --- a/services/ec2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/ec2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ec2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://ec2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://ec2.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://ec2-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://ec2.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://ec2-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ec2.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://ec2.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ec2.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://ec2.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 6c6b1eade17..330d367716e 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -8950,6 +8950,7 @@ ] }, "BareMetalFlag":{"type":"boolean"}, + "BaselineBandwidthInGbps":{"type":"double"}, "BaselineBandwidthInMbps":{"type":"integer"}, "BaselineEbsBandwidthMbps":{ "type":"structure", @@ -13499,6 +13500,10 @@ "shape":"String", "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", "idempotencyToken":true + }, + "EnablePrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

If you’re creating a network interface in a dual-stack or IPv6-only subnet, you have the option to assign a primary IPv6 IP address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if the instance that this ENI will be attached to relies on its IPv6 address not changing. Amazon Web Services will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address.

" } } }, @@ -25991,7 +25996,7 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

The ARN of the Outpost on which the snapshot is stored.

This parameter is only supported on BlockDeviceMapping objects called by CreateImage.

", + "documentation":"

The ARN of the Outpost on which the snapshot is stored.

This parameter is not supported when using CreateImage.

", "locationName":"outpostArn" }, "Encrypted":{ @@ -29399,6 +29404,11 @@ "shape":"Boolean", "documentation":"

Indicates whether encryption by default is enabled.

", "locationName":"ebsEncryptionByDefault" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"

Reserved for future use.

", + "locationName":"sseType" } } }, @@ -32633,6 +32643,11 @@ "shape":"InferenceDeviceInfoList", "documentation":"

Describes the Inference accelerators for the instance type.

", "locationName":"accelerators" + }, + "TotalInferenceMemoryInMiB":{ + "shape":"totalInferenceMemory", + "documentation":"

The total size of the memory for the inference accelerators for the instance type, in MiB.

", + "locationName":"totalInferenceMemoryInMiB" } }, "documentation":"

Describes the Inference accelerators for the instance type.

" @@ -32655,6 +32670,11 @@ "shape":"InferenceDeviceManufacturerName", "documentation":"

The manufacturer of the Inference accelerator.

", "locationName":"manufacturer" + }, + "MemoryInfo":{ + "shape":"InferenceDeviceMemoryInfo", + "documentation":"

Describes the memory available to the inference accelerator.

", + "locationName":"memoryInfo" } }, "documentation":"

Describes the Inference accelerators for the instance type.

" @@ -32665,6 +32685,18 @@ "locationName":"item" }, "InferenceDeviceManufacturerName":{"type":"string"}, + "InferenceDeviceMemoryInfo":{ + "type":"structure", + "members":{ + "SizeInMiB":{ + "shape":"InferenceDeviceMemorySize", + "documentation":"

The size of the memory available to the inference accelerator, in MiB.

", + "locationName":"sizeInMiB" + } + }, + "documentation":"

Describes the memory available to the inference accelerator.

" + }, + "InferenceDeviceMemorySize":{"type":"integer"}, "InferenceDeviceName":{"type":"string"}, "InsideCidrBlocksStringList":{ "type":"list", @@ -33570,6 +33602,11 @@ "shape":"String", "documentation":"

The IPv6 address.

", "locationName":"ipv6Address" + }, + "IsPrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see RunInstances.

", + "locationName":"isPrimaryIpv6" } }, "documentation":"

Describes an IPv6 address.

" @@ -34056,6 +34093,10 @@ "Ipv6PrefixCount":{ "shape":"Integer", "documentation":"

The number of IPv6 delegated prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix option.

" + }, + "PrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

The primary IPv6 address of the network interface. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information about primary IPv6 addresses, see RunInstances.

" } }, "documentation":"

Describes a network interface.

" @@ -35285,7 +35326,22 @@ "c7gn.4xlarge", "c7gn.8xlarge", "c7gn.12xlarge", - "c7gn.16xlarge" + "c7gn.16xlarge", + "p5.48xlarge", + "m7i.large", + "m7i.xlarge", + "m7i.2xlarge", + "m7i.4xlarge", + "m7i.8xlarge", + "m7i.12xlarge", + "m7i.16xlarge", + "m7i.24xlarge", + "m7i.48xlarge", + "m7i-flex.large", + "m7i-flex.xlarge", + "m7i-flex.2xlarge", + "m7i-flex.4xlarge", + "m7i-flex.8xlarge" ] }, "InstanceTypeHypervisor":{ @@ -37971,6 +38027,11 @@ "shape":"Integer", "documentation":"

The number of IPv6 prefixes that Amazon Web Services automatically assigned to the network interface.

", "locationName":"ipv6PrefixCount" + }, + "PrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

The primary IPv6 address of the network interface. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information about primary IPv6 addresses, see RunInstances.

", + "locationName":"primaryIpv6" } }, "documentation":"

Describes a network interface.

" @@ -38063,6 +38124,10 @@ "Ipv6PrefixCount":{ "shape":"Integer", "documentation":"

The number of IPv6 prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix option.

" + }, + "PrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

The primary IPv6 address of the network interface. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information about primary IPv6 addresses, see RunInstances.

" } }, "documentation":"

The parameters for a network interface.

" @@ -40677,6 +40742,10 @@ "EnaSrdSpecification":{ "shape":"EnaSrdSpecification", "documentation":"

Updates the ENA Express configuration for the network interface that’s attached to the instance.

" + }, + "EnablePrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

If you’re modifying a network interface in a dual-stack or IPv6-only subnet, you have the option to assign a primary IPv6 IP address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if the instance that this ENI will be attached to relies on its IPv6 address not changing. Amazon Web Services will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address.

" } }, "documentation":"

Contains the parameters for ModifyNetworkInterfaceAttribute.

" @@ -42754,6 +42823,16 @@ "shape":"MaxNetworkInterfaces", "documentation":"

The maximum number of network interfaces for the network card.

", "locationName":"maximumNetworkInterfaces" + }, + "BaselineBandwidthInGbps":{ + "shape":"BaselineBandwidthInGbps", + "documentation":"

The baseline network performance of the network card, in Gbps.

", + "locationName":"baselineBandwidthInGbps" + }, + "PeakBandwidthInGbps":{ + "shape":"PeakBandwidthInGbps", + "documentation":"

The peak (burst) network performance of the network card, in Gbps.

", + "locationName":"peakBandwidthInGbps" } }, "documentation":"

Describes the network card support of the instance type.

" @@ -43491,6 +43570,11 @@ "shape":"String", "documentation":"

The IPv6 address.

", "locationName":"ipv6Address" + }, + "IsPrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see ModifyNetworkInterfaceAttribute.

", + "locationName":"isPrimaryIpv6" } }, "documentation":"

Describes an IPv6 address associated with a network interface.

" @@ -44197,6 +44281,7 @@ }, "documentation":"

Describes the data that identifies an Amazon FPGA image (AFI) on the PCI bus.

" }, + "PeakBandwidthInGbps":{"type":"double"}, "PeeringAttachmentStatus":{ "type":"structure", "members":{ @@ -48456,6 +48541,11 @@ "shape":"Integer", "documentation":"

The size of the volume, in GiB.

", "locationName":"volumeSize" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"

Reserved for future use.

", + "locationName":"sseType" } } }, @@ -49209,6 +49299,10 @@ "DisableApiStop":{ "shape":"Boolean", "documentation":"

Indicates whether an instance is enabled for stop protection. For more information, see Stop protection.

" + }, + "EnablePrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

If you’re launching an instance into a dual-stack or IPv6-only subnet, you can enable assigning a primary IPv6 address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if an instance relies on its IPv6 address not changing. When you launch the instance, Amazon Web Services will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address.

" } } }, @@ -49309,6 +49403,14 @@ }, "documentation":"

Describes the storage parameters for Amazon S3 and Amazon S3 buckets for an instance store-backed AMI.

" }, + "SSEType":{ + "type":"string", + "enum":[ + "sse-ebs", + "sse-kms", + "none" + ] + }, "ScheduledInstance":{ "type":"structure", "members":{ @@ -50609,6 +50711,11 @@ "shape":"MillisecondDateTime", "documentation":"

Only for archived snapshots that are temporarily restored. Indicates the date and time when a temporarily restored snapshot will be automatically re-archived.

", "locationName":"restoreExpiryTime" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"

Reserved for future use.

", + "locationName":"sseType" } }, "documentation":"

Describes a snapshot.

" @@ -50770,6 +50877,11 @@ "shape":"String", "documentation":"

The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

", "locationName":"outpostArn" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"

Reserved for future use.

", + "locationName":"sseType" } }, "documentation":"

Information about a snapshot.

" @@ -51477,7 +51589,8 @@ "active", "closed", "cancelled", - "failed" + "failed", + "disabled" ] }, "SpotInstanceStateFault":{ @@ -56630,6 +56743,11 @@ "shape":"Integer", "documentation":"

The throughput that the volume supports, in MiB/s.

", "locationName":"throughput" + }, + "SseType":{ + "shape":"SSEType", + "documentation":"

Reserved for future use.

", + "locationName":"sseType" } }, "documentation":"

Describes a volume.

" @@ -58192,7 +58310,8 @@ } }, "totalFpgaMemory":{"type":"integer"}, - "totalGpuMemory":{"type":"integer"} + "totalGpuMemory":{"type":"integer"}, + "totalInferenceMemory":{"type":"integer"} }, "documentation":"Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the Amazon Web Services Cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the Amazon Web Services Cloud where you can launch Amazon Web Services resources in a virtual network that you've defined. Amazon Elastic Block Store (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available and reliable storage volumes that can be attached to any running instance and used like a hard drive.

To learn more, see the following resources:

" } diff --git a/services/ec2/src/main/resources/codegen-resources/waiters-2.json b/services/ec2/src/main/resources/codegen-resources/waiters-2.json index 4f73e720fcc..e890388e73b 100644 --- a/services/ec2/src/main/resources/codegen-resources/waiters-2.json +++ b/services/ec2/src/main/resources/codegen-resources/waiters-2.json @@ -506,6 +506,31 @@ } ] }, + "StoreImageTaskComplete": { + "delay": 5, + "operation": "DescribeStoreImageTasks", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "Completed", + "matcher": "pathAll", + "state": "success", + "argument": "StoreImageTaskResults[].StoreTaskState" + }, + { + "expected": "Failed", + "matcher": "pathAny", + "state": "failure", + "argument": "StoreImageTaskResults[].StoreTaskState" + }, + { + "expected": "InProgress", + "matcher": "pathAny", + "state": "retry", + "argument": "StoreImageTaskResults[].StoreTaskState" + } + ] + }, "SubnetAvailable": { "delay": 15, "operation": "DescribeSubnets", diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index a1ad42bcdd1..ef5551eed22 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index 30a678e8ff0..852c1af8473 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index 76f3c0b3e72..14440342a65 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 7c55aeb0b14..821bac01625 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/ecs/src/main/resources/codegen-resources/endpoint-rule-set.json index 1614858d7cb..57a28815f47 100644 --- a/services/ecs/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/ecs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ecs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://ecs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://ecs-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://ecs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ecs.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://ecs.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ecs.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://ecs.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index 3c6fb39aea7..6e93278b9dd 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -351,7 +351,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ClusterNotFoundException"} ], - "documentation":"

Describes a specified task or tasks.

Currently, stopped tasks appear in the returned results for at least one hour.

" + "documentation":"

Describes a specified task or tasks.

Currently, stopped tasks appear in the returned results for at least one hour.

If you have tasks with tags, and then delete the cluster, the tagged tasks are returned in the response. If you create a new cluster with the same name as the deleted cluster, the tagged tasks are not included in the response.

" }, "DiscoverPollEndpoint":{ "name":"DiscoverPollEndpoint", @@ -557,7 +557,7 @@ {"shape":"ClusterNotFoundException"}, {"shape":"ServiceNotFoundException"} ], - "documentation":"

Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task.

Recently stopped tasks might appear in the returned results. Currently, stopped tasks appear in the returned results for at least one hour.

" + "documentation":"

Returns a list of tasks. You can filter the results by cluster, task definition family, container instance, launch type, what IAM principal started the task, or by the desired status of the task.

Recently stopped tasks might appear in the returned results.

" }, "PutAccountSetting":{ "name":"PutAccountSetting", @@ -890,7 +890,7 @@ {"shape":"AccessDeniedException"}, {"shape":"NamespaceNotFoundException"} ], - "documentation":"

Modifies the parameters of a service.

For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.

  • Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

  • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

  • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

You must have a service-linked role when you update any of the following service properties. If you specified a custom role when you created the service, Amazon ECS automatically replaces the roleARN associated with the service with the ARN of your service-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide.

  • loadBalancers,

  • serviceRegistries

" + "documentation":"

Modifies the parameters of a service.

For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

  • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

  • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.

  • Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.

  • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.

    • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

    • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

  • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

  • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

You must have a service-linked role when you update any of the following service properties:

  • loadBalancers,

  • serviceRegistries

For more information about the role see the CreateService request parameter role .

" }, "UpdateServicePrimaryTaskSet":{ "name":"UpdateServicePrimaryTaskSet", @@ -1080,11 +1080,11 @@ "members":{ "autoScalingGroupArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) that identifies the Auto Scaling group.

" + "documentation":"

The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.

" }, "managedScaling":{ "shape":"ManagedScaling", - "documentation":"

The managed scaling settings for the Auto Scaling group capacity provider.

" + "documentation":"

he managed scaling settings for the Auto Scaling group capacity provider.

" }, "managedTerminationProtection":{ "shape":"ManagedTerminationProtection", @@ -1602,11 +1602,11 @@ }, "startTimeout":{ "shape":"BoxedInteger", - "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state.

When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value.

For tasks using the Fargate launch type, the task or service requires the following platforms:

  • Linux platform version 1.3.0 or later.

  • Windows platform version 1.0.0 or later.

For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a STOPPED state.

When the ECS_CONTAINER_START_TIMEOUT container agent configuration variable is used, it's enforced independently from this start timeout value.

For tasks using the Fargate launch type, the task or service requires the following platforms:

  • Linux platform version 1.3.0 or later.

  • Windows platform version 1.0.0 or later.

For tasks using the EC2 launch type, your container instances require at least version 1.26.0 of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

The valid values are 2-120 seconds.

" }, "stopTimeout":{ "shape":"BoxedInteger", - "documentation":"

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.

For tasks using the Fargate launch type, the task or service requires the following platforms:

  • Linux platform version 1.3.0 or later.

  • Windows platform version 1.0.0 or later.

The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.

For tasks that use the EC2 launch type, if the stopTimeout parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.

For tasks using the Fargate launch type, the task or service requires the following platforms:

  • Linux platform version 1.3.0 or later.

  • Windows platform version 1.0.0 or later.

The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.

For tasks that use the EC2 launch type, if the stopTimeout parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the stopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

The valid values are 2-120 seconds.

" }, "hostname":{ "shape":"String", @@ -1686,7 +1686,7 @@ }, "credentialSpecs":{ "shape":"StringList", - "documentation":"

A list of ARNs in SSM or Amazon S3 to a credential spec (credspeccode>) file that configures a container for Active Directory authentication. This parameter is only used with domainless authentication.

The format for each ARN is credentialspecdomainless:MyARN. Replace MyARN with the ARN in SSM or Amazon S3.

The credspec must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.

" + "documentation":"

A list of ARNs in SSM or Amazon S3 to a credential spec (CredSpec) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the dockerSecurityOptions. The maximum number of ARNs is 1.

There are two formats for each ARN.

credentialspecdomainless:MyARN

You use credentialspecdomainless:MyARN to provide a CredSpec with an additional section for a secret in Secrets Manager. You provide the login credentials to the domain in the secret.

Each task that runs on any container instance can join different domains.

You can use this format without joining the container instance to a domain.

credentialspec:MyARN

You use credentialspec:MyARN to provide a CredSpec for a single domain.

You must join the container instance to the domain before you start any tasks that use this task definition.

In both formats, replace MyARN with the ARN in SSM or Amazon S3.

If you provide a credentialspecdomainless:MyARN, the credspec must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.

" } }, "documentation":"

Container definitions are used in task definitions to describe the different containers that are launched as part of a task.

" @@ -1870,7 +1870,7 @@ "documentation":"

The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU.

" } }, - "documentation":"

The overrides that are sent to a container. An empty container override can be passed in. An example of an empty container override is {\"containerOverrides\": [ ] }. If a non-empty container override is specified, the name parameter must be included.

" + "documentation":"

The overrides that are sent to a container. An empty container override can be passed in. An example of an empty container override is {\"containerOverrides\": [ ] }. If a non-empty container override is specified, the name parameter must be included.

You can use Secrets Manager or Amazon Web Services Systems Manager Parameter Store to store the sensitive data. For more information, see Retrieve secrets through environment variables in the Amazon ECS Developer Guide.

" }, "ContainerOverrides":{ "type":"list", @@ -2119,7 +2119,7 @@ }, "taskDefinition":{ "shape":"String", - "documentation":"

The task definition for the tasks in the task set to use.

" + "documentation":"

The task definition for the tasks in the task set to use. If a revision isn't specified, the latest ACTIVE revision is used.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -2454,7 +2454,7 @@ "documentation":"

Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is on, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.

" } }, - "documentation":"

The deployment circuit breaker can only be used for services using the rolling update (ECS) deployment type.

The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. If it is turned on, a service deployment will transition to a failed state and stop launching new tasks. You can also configure Amazon ECS to roll back your service to the last completed deployment after a failure. For more information, see Rolling update in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The deployment circuit breaker can only be used for services using the rolling update (ECS) deployment type.

The deployment circuit breaker determines whether a service deployment will fail if the service can't reach a steady state. If it is turned on, a service deployment will transition to a failed state and stop launching new tasks. You can also configure Amazon ECS to roll back your service to the last completed deployment after a failure. For more information, see Rolling update in the Amazon Elastic Container Service Developer Guide.

For more information about API failure reasons, see API failure reasons in the Amazon Elastic Container Service Developer Guide.

" }, "DeploymentConfiguration":{ "type":"structure", @@ -2953,7 +2953,7 @@ "documentation":"

The file type to use. The only supported value is s3.

" } }, - "documentation":"

A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information about the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying environment variables in the Amazon Elastic Container Service Developer Guide.

This parameter is only supported for tasks hosted on Fargate using the following platform versions:

  • Linux platform version 1.4.0 or later.

  • Windows platform version 1.0.0 or later.

" + "documentation":"

A list of files containing the environment variables to pass to a container. You can specify up to ten environment files. The file must have a .env file extension. Each line in an environment file should contain an environment variable in VARIABLE=VALUE format. Lines beginning with # are treated as comments and are ignored. For more information about the environment variable file syntax, see Declare default environment variables in file.

If there are environment variables specified using the environment parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see Specifying environment variables in the Amazon Elastic Container Service Developer Guide.

You must use the following platforms for the Fargate launch type:

  • Linux platform version 1.4.0 or later.

  • Windows platform version 1.0.0 or later.

" }, "EnvironmentFileType":{ "type":"string", @@ -3817,11 +3817,11 @@ "members":{ "targetGroupArn":{ "shape":"String", - "documentation":"

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a Classic Load Balancer, omit the target group ARN.

For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.

If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.

" + "documentation":"

The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.

A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer.

For services using the ECS deployment controller, you can specify one or multiple target groups. For more information, see Registering multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

For services using the CODE_DEPLOY deployment controller, you're required to define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the Amazon Elastic Container Service Developer Guide.

If your service's task definition uses the awsvpc network mode, you must choose ip as the target type, not instance. Do this when creating your target groups because tasks that use the awsvpc network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.

" }, "loadBalancerName":{ "shape":"String", - "documentation":"

The name of the load balancer to associate with the Amazon ECS service or task set.

A load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.

" + "documentation":"

The name of the load balancer to associate with the Amazon ECS service or task set.

If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.

" }, "containerName":{ "shape":"String", @@ -3855,7 +3855,7 @@ "documentation":"

The secrets to pass to the log configuration. For more information, see Specifying sensitive data in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .

By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Understand the following when specifying a log configuration for your containers.

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the valid values below). Additional log drivers may be available in future releases of the Amazon ECS container agent.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" + "documentation":"

The log configuration for the container. This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run .

By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Understand the following when specifying a log configuration for your containers.

  • Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.

    For tasks on Fargate, the supported log drivers are awslogs, splunk, and awsfirelens.

    For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries,syslog, splunk, and awsfirelens.

  • This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.

  • For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

  • For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.

" }, "LogConfigurationOptionsMap":{ "type":"map", @@ -3954,7 +3954,7 @@ }, "maximumScalingStepSize":{ "shape":"ManagedScalingStepSize", - "documentation":"

The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 1 is used.

" + "documentation":"

The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of 10000 is used.

" }, "instanceWarmupPeriod":{ "shape":"ManagedScalingInstanceWarmupPeriod", @@ -4230,7 +4230,7 @@ }, "hostPort":{ "shape":"BoxedInteger", - "documentation":"

The port number on the container instance to reserve for your container.

If you specify a containerPortRange, leave this field empty and the value of the hostPort is set as follows:

  • For containers in a task with the awsvpc network mode, the hostPort is set to the same value as the containerPort. This is a static mapping strategy.

  • For containers in a task with the bridge network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy.

If you use containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort.

If you use containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.

" + "documentation":"

The port number on the container instance to reserve for your container.

If you specify a containerPortRange, leave this field empty and the value of the hostPort is set as follows:

  • For containers in a task with the awsvpc network mode, the hostPort is set to the same value as the containerPort. This is a static mapping strategy.

  • For containers in a task with the bridge network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy.

If you use containers in a task with the awsvpc or host network mode, the hostPort can either be left blank or set to the same value as the containerPort.

If you use containers in a task with the bridge network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort (or set it to 0) while specifying a containerPort and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.

The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 (Linux) or 49152 through 65535 (Windows) is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.

The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.

" }, "protocol":{ "shape":"TransportProtocol", @@ -5623,7 +5623,7 @@ }, "stopCode":{ "shape":"TaskStopCode", - "documentation":"

The stop code indicating why a task was stopped. The stoppedReason might contain additional details.

The following are valid values:

  • TaskFailedToStart

  • EssentialContainerExited

  • UserInitiated

  • TerminationNotice

  • ServiceSchedulerInitiated

  • SpotInterruption

" + "documentation":"

The stop code indicating why a task was stopped. The stoppedReason might contain additional details.

For more information about stop code, see Stopped tasks error codes in the Amazon ECS User Guide.

The following are valid values:

  • TaskFailedToStart

  • EssentialContainerExited

  • UserInitiated

  • TerminationNotice

  • ServiceSchedulerInitiated

  • SpotInterruption

" }, "stoppedAt":{ "shape":"Timestamp", @@ -5635,7 +5635,7 @@ }, "stoppingAt":{ "shape":"Timestamp", - "documentation":"

The Unix timestamp for the time when the task stops. More specifically, it's for the time when the task transitions from the RUNNING state to STOPPED.

" + "documentation":"

The Unix timestamp for the time when the task stops. More specifically, it's for the time when the task transitions from the RUNNING state to STOPPING.

" }, "tags":{ "shape":"Tags", @@ -5717,7 +5717,7 @@ }, "requiresCompatibilities":{ "shape":"CompatibilityList", - "documentation":"

The task launch types the task definition was validated against. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The task launch types the task definition was validated against. The valid values are EC2, FARGATE, and EXTERNAL. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

" }, "cpu":{ "shape":"String", @@ -6463,7 +6463,7 @@ "members":{ "name":{ "shape":"String", - "documentation":"

The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.

" + "documentation":"

The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.

This is required wwhen you use an Amazon EFS volume.

" }, "host":{ "shape":"HostVolumeProperties", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index de91d9cbcb0..f55f3d96d77 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/eks/pom.xml b/services/eks/pom.xml index 1d7e19bafd7..ad9730b620f 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/eks/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/eks/src/main/resources/codegen-resources/endpoint-rule-set.json index 4b665084edc..4d1971250c5 100644 --- a/services/eks/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/eks/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,224 +111,288 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "Region" } - ] - }, + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://eks-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", "argv": [ - "aws", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsDualStack" ] } ] } ], - "endpoint": { - "url": "https://fips.eks.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://eks-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://eks.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://fips.eks.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://eks.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://eks-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://eks-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://eks.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://eks.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://eks.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://eks.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/eks/src/main/resources/codegen-resources/endpoint-tests.json b/services/eks/src/main/resources/codegen-resources/endpoint-tests.json index 8cc05949df5..0355c7dcc07 100644 --- a/services/eks/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/eks/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,980 +1,31 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-northeast-3.amazonaws.com" + "url": "https://eks.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-northeast-2.amazonaws.com" + "url": "https://eks.ap-east-1.amazonaws.com" } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", "UseDualStack": false } }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, { "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { @@ -983,680 +34,537 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://eks.me-south-1.amazonaws.com" - } - }, - "params": { "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.sa-east-1.amazonaws.com" + "url": "https://eks.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://fips.eks.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-east-1.api.aws" + "url": "https://eks.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-east-1.amazonaws.com" + "url": "https://eks.ap-south-1.amazonaws.com" } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "Region": "ap-east-1", "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.cn-north-1.amazonaws.com.cn" + "url": "https://eks.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "cn-north-1", + "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://eks.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.cn-north-1.amazonaws.com.cn" + "url": "https://eks.ap-southeast-3.amazonaws.com" } }, "params": { + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "cn-north-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://eks-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-gov-west-1.amazonaws.com" + "url": "https://eks.ca-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", + "Region": "ca-central-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-gov-west-1.api.aws" + "url": "https://eks.eu-central-1.amazonaws.com" } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-gov-west-1.amazonaws.com" + "url": "https://eks.eu-north-1.amazonaws.com" } }, "params": { + "Region": "eu-north-1", "UseFIPS": false, - "Region": "us-gov-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.ap-southeast-1.api.aws" + "url": "https://eks.eu-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.ap-southeast-1.amazonaws.com" + "url": "https://eks.eu-west-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", + "Region": "eu-west-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-1.api.aws" + "url": "https://eks.eu-west-2.amazonaws.com" } }, "params": { + "Region": "eu-west-2", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-1.amazonaws.com" + "url": "https://eks.eu-west-3.amazonaws.com" } }, "params": { + "Region": "eu-west-3", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.ap-southeast-2.api.aws" + "url": "https://eks.me-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.ap-southeast-2.amazonaws.com" + "url": "https://eks.sa-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", + "Region": "sa-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-2.api.aws" + "url": "https://eks.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-2.amazonaws.com" + "url": "https://fips.eks.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-2", + "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://eks.us-east-2.amazonaws.com" + } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://fips.eks.us-east-2.amazonaws.com" } }, "params": { + "Region": "us-east-2", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://eks.us-west-1.amazonaws.com" + } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-iso-east-1.c2s.ic.gov" + "url": "https://fips.eks.us-west-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-iso-east-1", + "Region": "us-west-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.ap-southeast-3.api.aws" + "url": "https://eks.us-west-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.ap-southeast-3.amazonaws.com" + "url": "https://fips.eks.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": true, - "Region": "ap-southeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-3.api.aws" + "url": "https://eks-fips.us-east-1.api.aws" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-3", + "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-3.amazonaws.com" + "url": "https://eks.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.ap-southeast-4.api.aws" + "url": "https://eks.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-4", - "UseDualStack": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.ap-southeast-4.amazonaws.com" + "url": "https://eks.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-4", + "Region": "cn-northwest-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-4.api.aws" + "url": "https://eks-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-4", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.ap-southeast-4.amazonaws.com" + "url": "https://eks-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseFIPS": false, - "Region": "ap-southeast-4", + "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks-fips.us-east-1.api.aws" + "url": "https://eks.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", + "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://fips.eks.us-east-1.amazonaws.com" + "url": "https://eks.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-east-1", + "Region": "us-gov-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-east-1.api.aws" + "url": "https://eks.us-gov-east-1.amazonaws.com" } }, "params": { - "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-east-1.amazonaws.com" + "url": "https://eks.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.us-east-2.api.aws" + "url": "https://eks.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://fips.eks.us-east-2.amazonaws.com" + "url": "https://eks-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://eks.us-east-2.api.aws" + "url": "https://eks.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.us-east-2.amazonaws.com" + "url": "https://eks.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://eks-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://eks-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://eks.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://eks.cn-northwest-1.amazonaws.com.cn" + "url": "https://eks.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": false } }, @@ -1666,8 +574,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -1679,8 +587,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -1690,26 +598,27 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://eks.us-isob-east-1.sc2s.sgov.gov" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-isob-east-1", - "UseDualStack": false + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -1717,7 +626,6 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1728,8 +636,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1740,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/eks/src/main/resources/codegen-resources/service-2.json b/services/eks/src/main/resources/codegen-resources/service-2.json index 1c034287282..4452bcc29fe 100644 --- a/services/eks/src/main/resources/codegen-resources/service-2.json +++ b/services/eks/src/main/resources/codegen-resources/service-2.json @@ -2669,6 +2669,7 @@ "Ec2SubnetNotFound", "Ec2SubnetInvalidConfiguration", "IamInstanceProfileNotFound", + "Ec2SubnetMissingIpv6Assignment", "IamLimitExceeded", "IamNodeRoleNotFound", "NodeCreationFailure", @@ -2678,7 +2679,20 @@ "AccessDenied", "InternalFailure", "ClusterUnreachable", - "Ec2SubnetMissingIpv6Assignment" + "AmiIdNotFound", + "AutoScalingGroupOptInRequired", + "AutoScalingGroupRateLimitExceeded", + "Ec2LaunchTemplateDeletionFailure", + "Ec2LaunchTemplateInvalidConfiguration", + "Ec2LaunchTemplateMaxLimitExceeded", + "Ec2SubnetListTooLong", + "IamThrottling", + "NodeTerminationFailure", + "PodEvictionFailure", + "SourceEc2LaunchTemplateNotFound", + "LimitExceeded", + "Unknown", + "AutoScalingGroupInstanceRefreshActive" ] }, "NodegroupResources":{ diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index 615d184e3eb..dcb5c9d4b09 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticache/src/main/resources/codegen-resources/service-2.json b/services/elasticache/src/main/resources/codegen-resources/service-2.json index 7721d619e4d..902bdcd4f48 100644 --- a/services/elasticache/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticache/src/main/resources/codegen-resources/service-2.json @@ -1329,6 +1329,25 @@ {"shape":"InvalidParameterCombinationException"} ], "documentation":"

Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).

This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API.

Note the following

  • A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period.

  • If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently.

  • If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made.

  • To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance:

    1. Replication group message: Test Failover API called for node group <node-group-id>

    2. Cache cluster message: Failover from primary node <primary-node-id> to replica node <node-id> completed

    3. Replication group message: Failover from primary node <primary-node-id> to replica node <node-id> completed

    4. Cache cluster message: Recovering cache nodes <node-id>

    5. Cache cluster message: Finished recovery for cache nodes <node-id>

    For more information see:

Also see, Testing Multi-AZ in the ElastiCache User Guide.

" + }, + "TestMigration":{ + "name":"TestMigration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestMigrationMessage"}, + "output":{ + "shape":"TestMigrationResponse", + "resultWrapper":"TestMigrationResult" + }, + "errors":[ + {"shape":"ReplicationGroupNotFoundFault"}, + {"shape":"InvalidReplicationGroupStateFault"}, + {"shape":"ReplicationGroupAlreadyUnderMigrationFault"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"

Async API to test connection between source and target replication group.

" } }, "shapes":{ @@ -4137,7 +4156,7 @@ }, "NodeGroupCount":{ "shape":"Integer", - "documentation":"

The number of node groups you wish to add

" + "documentation":"

Total number of node groups you want

" }, "RegionalConfigurations":{ "shape":"RegionalConfigurationList", @@ -6711,6 +6730,29 @@ "ReplicationGroup":{"shape":"ReplicationGroup"} } }, + "TestMigrationMessage":{ + "type":"structure", + "required":[ + "ReplicationGroupId", + "CustomerNodeEndpointList" + ], + "members":{ + "ReplicationGroupId":{ + "shape":"String", + "documentation":"

The ID of the replication group to which data is to be migrated.

" + }, + "CustomerNodeEndpointList":{ + "shape":"CustomerNodeEndpointList", + "documentation":"

List of endpoints from which data should be migrated. List should have only one element.

" + } + } + }, + "TestMigrationResponse":{ + "type":"structure", + "members":{ + "ReplicationGroup":{"shape":"ReplicationGroup"} + } + }, "TimeRangeFilter":{ "type":"structure", "members":{ diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index 4014a8391a8..aba0b7f911e 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index f11c052f1f1..a04cda02a18 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 66fee6bfd61..1a5e796e3d5 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index 37f64408806..eb38fefa86e 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 79fee5593dd..78128a590f9 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,179 +111,240 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ - "aws-us-gov", + true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "name" + "supportsFIPS" ] } ] } ], - "endpoint": { - "url": "https://elasticloadbalancing.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://elasticloadbalancing.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -311,7 +352,7 @@ { "conditions": [], "endpoint": { - "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -320,28 +361,13 @@ ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elasticloadbalancing.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-tests.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-tests.json index 30ae4512f69..a5e023b96c1 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,42 +1,55 @@ { "testCases": [ { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-iso-east-1.c2s.ic.gov" + "url": "https://elasticloadbalancing.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "af-south-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-iso-west-1.c2s.ic.gov" + "url": "https://elasticloadbalancing.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-east-1", "UseFIPS": false, - "Region": "us-iso-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://elasticloadbalancing.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-iso-east-1" + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://elasticloadbalancing.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -47,87 +60,87 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "ap-northeast-3" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-east-1.amazonaws.com" + "url": "https://elasticloadbalancing.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-south-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-east-1.amazonaws.com" + "url": "https://elasticloadbalancing.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-east-1" + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.eu-west-1.amazonaws.com" + "url": "https://elasticloadbalancing.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.eu-west-2.amazonaws.com" + "url": "https://elasticloadbalancing.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-3", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.eu-west-3.amazonaws.com" + "url": "https://elasticloadbalancing.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "ca-central-1", "UseFIPS": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.me-south-1.amazonaws.com" + "url": "https://elasticloadbalancing.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -138,139 +151,139 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-north-1", "UseFIPS": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-east-2.amazonaws.com" + "url": "https://elasticloadbalancing.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-south-1", "UseFIPS": false, - "Region": "us-east-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-east-2.amazonaws.com" + "url": "https://elasticloadbalancing.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": true, - "Region": "us-east-2" + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.sa-east-1.amazonaws.com" + "url": "https://elasticloadbalancing.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-east-1.amazonaws.com" + "url": "https://elasticloadbalancing.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "eu-west-3", "UseFIPS": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.eu-south-1.amazonaws.com" + "url": "https://elasticloadbalancing.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "me-south-1", "UseFIPS": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.eu-central-1.amazonaws.com" + "url": "https://elasticloadbalancing.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "sa-east-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-1.amazonaws.com" + "url": "https://elasticloadbalancing.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-2.amazonaws.com" + "url": "https://elasticloadbalancing-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ap-southeast-2" + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-southeast-3.amazonaws.com" + "url": "https://elasticloadbalancing.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-2", "UseFIPS": false, - "Region": "ap-southeast-3" + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ca-central-1.amazonaws.com" + "url": "https://elasticloadbalancing-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "ca-central-1" + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -281,9 +294,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -294,9 +307,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-1", "UseFIPS": true, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -307,9 +320,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -320,139 +333,152 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": true, - "Region": "us-west-2" + "UseDualStack": false } }, { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.af-south-1.amazonaws.com" + "url": "https://elasticloadbalancing-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "UseFIPS": false, - "Region": "af-south-1" + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-south-1.amazonaws.com" + "url": "https://elasticloadbalancing.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-south-1" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-northeast-1.amazonaws.com" + "url": "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.ap-northeast-2.amazonaws.com" + "url": "https://elasticloadbalancing.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-east-1.api.aws" + "url": "https://elasticloadbalancing-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-east-1.api.aws" + "url": "https://elasticloadbalancing-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://elasticloadbalancing.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-gov-west-1.amazonaws.com" + "url": "https://elasticloadbalancing.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-gov-west-1.amazonaws.com" + "url": "https://elasticloadbalancing.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-gov-east-1.amazonaws.com" + "url": "https://elasticloadbalancing.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-gov-east-1.amazonaws.com" + "url": "https://elasticloadbalancing.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-west-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -463,9 +489,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -476,113 +502,144 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.us-isob-east-1.sc2s.sgov.gov" + "url": "https://elasticloadbalancing.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://elasticloadbalancing.us-iso-west-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.cn-northwest-1.amazonaws.com.cn" + "url": "https://elasticloadbalancing-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn" + "url": "https://elasticloadbalancing.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticloadbalancing-fips.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing-fips.cn-north-1.amazonaws.com.cn" + "url": "https://elasticloadbalancing-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://elasticloadbalancing.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://example.com" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false, + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -592,9 +649,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -604,11 +661,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json index d2f143bcdec..df77c9e4543 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json @@ -244,7 +244,7 @@ {"shape":"TargetGroupNotFoundException"}, {"shape":"InvalidTargetException"} ], - "documentation":"

Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.

" + "documentation":"

Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.

Note: If the specified target does not exist, the action returns successfully.

" }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", @@ -646,7 +646,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"InvalidSecurityGroupException"} ], - "documentation":"

Associates the specified security groups with the specified Application Load Balancer. The specified security groups override the previously associated security groups.

You can't specify a security group for a Network Load Balancer or Gateway Load Balancer.

" + "documentation":"

Associates the specified security groups with the specified Application Load Balancer or Network Load Balancer. The specified security groups override the previously associated security groups.

You can't perform this operation on a Network Load Balancer unless you specified a security group for the load balancer when you created it.

You can't associate a security group with a Gateway Load Balancer.

" }, "SetSubnets":{ "name":"SetSubnets", @@ -1119,7 +1119,7 @@ }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

[Application Load Balancers] The IDs of the security groups for the load balancer.

" + "documentation":"

[Application Load Balancers and Network Load Balancers] The IDs of the security groups for the load balancer.

" }, "Scheme":{ "shape":"LoadBalancerSchemeEnum", @@ -1725,6 +1725,14 @@ }, "exception":true }, + "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic":{"type":"string"}, + "EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum":{ + "type":"string", + "enum":[ + "on", + "off" + ] + }, "FixedResponseActionConfig":{ "type":"structure", "required":["StatusCode"], @@ -1943,7 +1951,7 @@ "documentation":"

The maximum value of the limit.

" } }, - "documentation":"

Information about an Elastic Load Balancing resource limit for your Amazon Web Services account.

" + "documentation":"

Information about an Elastic Load Balancing resource limit for your Amazon Web Services account.

For more information, see the following:

" }, "Limits":{ "type":"list", @@ -2066,6 +2074,10 @@ "CustomerOwnedIpv4Pool":{ "shape":"CustomerOwnedIpv4Pool", "documentation":"

[Application Load Balancers on Outposts] The ID of the customer-owned address pool.

" + }, + "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic":{ + "shape":"EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic", + "documentation":"

Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink.

" } }, "documentation":"

Information about a load balancer.

" @@ -2682,7 +2694,7 @@ "documentation":"

Information for a source IP condition. Specify only when Field is source-ip.

" } }, - "documentation":"

Information about a condition for a rule.

Each rule can optionally include up to one of each of the following conditions: http-request-method, host-header, path-pattern, and source-ip. Each rule can also optionally include one or more of each of the following conditions: http-header and query-string. Note that the value for a condition cannot be empty.

" + "documentation":"

Information about a condition for a rule.

Each rule can optionally include up to one of each of the following conditions: http-request-method, host-header, path-pattern, and source-ip. Each rule can also optionally include one or more of each of the following conditions: http-header and query-string. Note that the value for a condition cannot be empty.

For more information, see Quotas for your Application Load Balancers.

" }, "RuleConditionList":{ "type":"list", @@ -2803,6 +2815,10 @@ "SecurityGroups":{ "shape":"SecurityGroups", "documentation":"

The IDs of the security groups.

" + }, + "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic":{ + "shape":"EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum", + "documentation":"

Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink. The default is on.

" } } }, @@ -2812,6 +2828,10 @@ "SecurityGroupIds":{ "shape":"SecurityGroups", "documentation":"

The IDs of the security groups associated with the load balancer.

" + }, + "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic":{ + "shape":"EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum", + "documentation":"

Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink.

" } } }, @@ -2833,7 +2853,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

[Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. .

" + "documentation":"

[Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

" } } }, @@ -3006,7 +3026,7 @@ }, "Port":{ "shape":"Port", - "documentation":"

The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. If the target type is alb, the targeted Application Load Balancer must have at least one listener whose port matches the target group port. Not used if the target is a Lambda function.

" + "documentation":"

The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. If the target type is alb, the targeted Application Load Balancer must have at least one listener whose port matches the target group port. This parameter is not used if the target is a Lambda function.

" }, "AvailabilityZone":{ "shape":"ZoneName", @@ -3036,7 +3056,7 @@ }, "Port":{ "shape":"Port", - "documentation":"

The port on which the targets are listening. Not used if the target is a Lambda function.

" + "documentation":"

The port on which the targets are listening. This parameter is not used if the target is a Lambda function.

" }, "VpcId":{ "shape":"VpcId", @@ -3080,7 +3100,7 @@ }, "LoadBalancerArns":{ "shape":"LoadBalancerArns", - "documentation":"

The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.

" + "documentation":"

The Amazon Resource Name (ARN) of the load balancer that routes traffic to this target group. You can use each target group with only one load balancer.

" }, "TargetType":{ "shape":"TargetTypeEnum", diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index f2d9607d3e4..42e690de0b2 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index 3312c0cf6a8..adc295ead0a 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index 0dae6b7fa3a..e6179ac876c 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index f8c74dbd800..6b3b3e6bda0 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrserverless/pom.xml b/services/emrserverless/pom.xml index 8052b5f5c3b..7e2c2ba2bf3 100644 --- a/services/emrserverless/pom.xml +++ b/services/emrserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT emrserverless AWS Java SDK :: Services :: EMR Serverless diff --git a/services/emrserverless/src/main/resources/codegen-resources/service-2.json b/services/emrserverless/src/main/resources/codegen-resources/service-2.json index 1b77304cc36..a4437ef55a3 100644 --- a/services/emrserverless/src/main/resources/codegen-resources/service-2.json +++ b/services/emrserverless/src/main/resources/codegen-resources/service-2.json @@ -94,7 +94,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a URL to access the job run dashboard. The generated URL is valid for one hour, after which you must invoke the API again to generate a new URL.

" + "documentation":"

Creates and returns a URL that you can use to access the application UIs for a job run.

For jobs in a running state, the application UI is a live user interface such as the Spark or Tez web UI. For completed jobs, the application UI is a persistent application user interface such as the Spark History Server or persistent Tez UI.

The URL is valid for one hour after you generate it. To access the application UI after that hour elapses, you must invoke the API again to generate a new URL.

" }, "GetJobRun":{ "name":"GetJobRun", @@ -529,6 +529,33 @@ "min":1, "pattern":"[A-Za-z0-9._-]+" }, + "CloudWatchLoggingConfiguration":{ + "type":"structure", + "required":["enabled"], + "members":{ + "enabled":{ + "shape":"Boolean", + "documentation":"

Enables CloudWatch logging.

" + }, + "logGroupName":{ + "shape":"LogGroupName", + "documentation":"

The name of the log group in Amazon CloudWatch Logs where you want to publish your logs.

" + }, + "logStreamNamePrefix":{ + "shape":"LogStreamNamePrefix", + "documentation":"

Prefix for the CloudWatch log stream name.

" + }, + "encryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The Key Management Service (KMS) key ARN to encrypt the logs that you store in CloudWatch Logs.

" + }, + "logTypes":{ + "shape":"LogTypeMap", + "documentation":"

The types of logs that you want to publish to CloudWatch. If you don't specify any log types, driver STDOUT and STDERR logs will be published to CloudWatch Logs by default. For more information including the supported worker types for Hive and Spark, see Logging for EMR Serverless with CloudWatch.

  • Key Valid Values: SPARK_DRIVER, SPARK_EXECUTOR, HIVE_DRIVER, TEZ_TASK

  • Array Members Valid Values: STDOUT, STDERR, HIVE_LOG, TEZ_AM, SYSTEM_LOGS

" + } + }, + "documentation":"

The Amazon CloudWatch configuration for monitoring logs. You can configure your jobs to send log information to CloudWatch.

" + }, "Configuration":{ "type":"structure", "required":["classification"], @@ -1263,6 +1290,38 @@ } } }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "LogStreamNamePrefix":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[^:*]*" + }, + "LogTypeList":{ + "type":"list", + "member":{"shape":"LogTypeString"}, + "max":5, + "min":1 + }, + "LogTypeMap":{ + "type":"map", + "key":{"shape":"WorkerTypeString"}, + "value":{"shape":"LogTypeList"}, + "max":4, + "min":1 + }, + "LogTypeString":{ + "type":"string", + "documentation":"

Log type for a Spark/Hive job-run.

", + "max":50, + "min":1, + "pattern":"[a-zA-Z]+[-_]*[a-zA-Z]+" + }, "ManagedPersistenceMonitoringConfiguration":{ "type":"structure", "members":{ @@ -1315,6 +1374,10 @@ "managedPersistenceMonitoringConfiguration":{ "shape":"ManagedPersistenceMonitoringConfiguration", "documentation":"

The managed log persistence configuration for a job run.

" + }, + "cloudWatchLoggingConfiguration":{ + "shape":"CloudWatchLoggingConfiguration", + "documentation":"

The Amazon CloudWatch configuration for monitoring logs. You can configure your jobs to send log information to CloudWatch.

" } }, "documentation":"

The configuration setting for monitoring.

" @@ -1833,6 +1896,7 @@ }, "WorkerTypeString":{ "type":"string", + "documentation":"

Worker type for an analytics framework.

", "max":50, "min":1, "pattern":"[a-zA-Z]+[-_]*[a-zA-Z]+" diff --git a/services/entityresolution/pom.xml b/services/entityresolution/pom.xml new file mode 100644 index 00000000000..716d634e766 --- /dev/null +++ b/services/entityresolution/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.126-SNAPSHOT + + entityresolution + AWS Java SDK :: Services :: Entity Resolution + The AWS Java SDK for Entity Resolution module holds the client classes that are used for + communicating with Entity Resolution. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.entityresolution + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/entityresolution/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/entityresolution/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 00000000000..b4f271ba393 --- /dev/null +++ b/services/entityresolution/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entityresolution-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entityresolution-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entityresolution.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://entityresolution.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/entityresolution/src/main/resources/codegen-resources/endpoint-tests.json b/services/entityresolution/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 00000000000..a836f3e8d6c --- /dev/null +++ b/services/entityresolution/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,295 @@ +{ + "testCases": [ + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://entityresolution.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": false, + "Endpoint": "https://example.com" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/entityresolution/src/main/resources/codegen-resources/paginators-1.json b/services/entityresolution/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 00000000000..5a129d437d5 --- /dev/null +++ b/services/entityresolution/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListMatchingJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "jobs" + }, + "ListMatchingWorkflows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "workflowSummaries" + }, + "ListSchemaMappings": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "schemaList" + } + } +} diff --git a/services/entityresolution/src/main/resources/codegen-resources/service-2.json b/services/entityresolution/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 00000000000..2603a3901ed --- /dev/null +++ b/services/entityresolution/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1543 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"entityresolution", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"AWSEntityResolution", + "serviceFullName":"AWS EntityResolution", + "serviceId":"EntityResolution", + "signatureVersion":"v4", + "signingName":"entityresolution", + "uid":"entityresolution-2018-05-10" + }, + "operations":{ + "CreateMatchingWorkflow":{ + "name":"CreateMatchingWorkflow", + "http":{ + "method":"POST", + "requestUri":"/matchingworkflows", + "responseCode":200 + }, + "input":{"shape":"CreateMatchingWorkflowInput"}, + "output":{"shape":"CreateMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ExceedsLimitException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a MatchingWorkflow object which stores the configuration of the data processing job to be run. It is important to note that there should not be a pre-existing MatchingWorkflow with the same name. To modify an existing workflow, utilize the UpdateMatchingWorkflow API.

" + }, + "CreateSchemaMapping":{ + "name":"CreateSchemaMapping", + "http":{ + "method":"POST", + "requestUri":"/schemas", + "responseCode":200 + }, + "input":{"shape":"CreateSchemaMappingInput"}, + "output":{"shape":"CreateSchemaMappingOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ExceedsLimitException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a schema mapping, which defines the schema of the input customer records table. The SchemaMapping also provides Entity Resolution with some metadata about the table, such as the attribute types of the columns and which columns to match on.

" + }, + "DeleteMatchingWorkflow":{ + "name":"DeleteMatchingWorkflow", + "http":{ + "method":"DELETE", + "requestUri":"/matchingworkflows/{workflowName}", + "responseCode":200 + }, + "input":{"shape":"DeleteMatchingWorkflowInput"}, + "output":{"shape":"DeleteMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the MatchingWorkflow with a given name. This operation will succeed even if a workflow with the given name does not exist.

", + "idempotent":true + }, + "DeleteSchemaMapping":{ + "name":"DeleteSchemaMapping", + "http":{ + "method":"DELETE", + "requestUri":"/schemas/{schemaName}", + "responseCode":200 + }, + "input":{"shape":"DeleteSchemaMappingInput"}, + "output":{"shape":"DeleteSchemaMappingOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the SchemaMapping with a given name. This operation will succeed even if a schema with the given name does not exist. This operation will fail if there is a DataIntegrationWorkflow object that references the SchemaMapping in the workflow's InputSourceConfig.

", + "idempotent":true + }, + "GetMatchId":{ + "name":"GetMatchId", + "http":{ + "method":"POST", + "requestUri":"/matchingworkflows/{workflowName}/matches", + "responseCode":200 + }, + "input":{"shape":"GetMatchIdInput"}, + "output":{"shape":"GetMatchIdOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the corresponding Match ID of a customer record if the record has been processed.

" + }, + "GetMatchingJob":{ + "name":"GetMatchingJob", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}/jobs/{jobId}", + "responseCode":200 + }, + "input":{"shape":"GetMatchingJobInput"}, + "output":{"shape":"GetMatchingJobOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Gets the status, metrics, and errors (if there are any) that are associated with a job.

" + }, + "GetMatchingWorkflow":{ + "name":"GetMatchingWorkflow", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}", + "responseCode":200 + }, + "input":{"shape":"GetMatchingWorkflowInput"}, + "output":{"shape":"GetMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the MatchingWorkflow with a given name, if it exists.

" + }, + "GetSchemaMapping":{ + "name":"GetSchemaMapping", + "http":{ + "method":"GET", + "requestUri":"/schemas/{schemaName}", + "responseCode":200 + }, + "input":{"shape":"GetSchemaMappingInput"}, + "output":{"shape":"GetSchemaMappingOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns the SchemaMapping of a given name.

" + }, + "ListMatchingJobs":{ + "name":"ListMatchingJobs", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows/{workflowName}/jobs", + "responseCode":200 + }, + "input":{"shape":"ListMatchingJobsInput"}, + "output":{"shape":"ListMatchingJobsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all jobs for a given workflow.

" + }, + "ListMatchingWorkflows":{ + "name":"ListMatchingWorkflows", + "http":{ + "method":"GET", + "requestUri":"/matchingworkflows", + "responseCode":200 + }, + "input":{"shape":"ListMatchingWorkflowsInput"}, + "output":{"shape":"ListMatchingWorkflowsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of all the MatchingWorkflows that have been created for an AWS account.

" + }, + "ListSchemaMappings":{ + "name":"ListSchemaMappings", + "http":{ + "method":"GET", + "requestUri":"/schemas", + "responseCode":200 + }, + "input":{"shape":"ListSchemaMappingsInput"}, + "output":{"shape":"ListSchemaMappingsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Returns a list of all the SchemaMappings that have been created for an AWS account.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Displays the tags associated with an AWS Entity Resolution resource. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged.

" + }, + "StartMatchingJob":{ + "name":"StartMatchingJob", + "http":{ + "method":"POST", + "requestUri":"/matchingworkflows/{workflowName}/jobs", + "responseCode":200 + }, + "input":{"shape":"StartMatchingJobInput"}, + "output":{"shape":"StartMatchingJobOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ExceedsLimitException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Starts the MatchingJob of a workflow. The workflow must have previously been created using the CreateMatchingWorkflow endpoint.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Assigns one or more tags (key-value pairs) to the specified AWS Entity Resolution resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged. Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters. You can use the TagResource action with a resource that already has tags. If you specify a new tag key, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes one or more tags from the specified AWS Entity Resolution resource. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged.

", + "idempotent":true + }, + "UpdateMatchingWorkflow":{ + "name":"UpdateMatchingWorkflow", + "http":{ + "method":"PUT", + "requestUri":"/matchingworkflows/{workflowName}", + "responseCode":200 + }, + "input":{"shape":"UpdateMatchingWorkflowInput"}, + "output":{"shape":"UpdateMatchingWorkflowOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates an existing MatchingWorkflow. This method is identical to CreateMatchingWorkflow, except it uses an HTTP PUT request instead of a POST request, and the MatchingWorkflow must already exist for the method to succeed.

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

You do not have sufficient access to perform this action. HTTP Status Code: 403

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AttributeMatchingModel":{ + "type":"string", + "enum":[ + "ONE_TO_ONE", + "MANY_TO_MANY" + ] + }, + "AttributeName":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9- \\t]*$" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request could not be processed because of conflict in the current state of the resource. Example: Workflow already exists, Schema already exists, Workflow is currently running, etc. HTTP Status Code: 400

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "CreateMatchingWorkflowInput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowName" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

A description of the workflow.

" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"

An object which defines an incremental run type and has only incrementalRunType as a field.

" + }, + "inputSourceConfig":{ + "shape":"InputSourceConfig", + "documentation":"

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

" + }, + "outputSourceConfig":{ + "shape":"OutputSourceConfig", + "documentation":"

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

" + }, + "resolutionTechniques":{ + "shape":"ResolutionTechniques", + "documentation":"

An object which defines the resolutionType and the ruleBasedProperties

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow. There cannot be multiple DataIntegrationWorkflows with the same name.

" + } + } + }, + "CreateMatchingWorkflowOutput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowArn", + "workflowName" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

A description of the workflow.

" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"

An object which defines an incremental run type and has only incrementalRunType as a field.

" + }, + "inputSourceConfig":{ + "shape":"InputSourceConfig", + "documentation":"

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

" + }, + "outputSourceConfig":{ + "shape":"OutputSourceConfig", + "documentation":"

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

" + }, + "resolutionTechniques":{ + "shape":"ResolutionTechniques", + "documentation":"

An object which defines the resolutionType and the ruleBasedProperties

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

" + }, + "workflowArn":{ + "shape":"MatchingWorkflowArn", + "documentation":"

The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.

" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow.

" + } + } + }, + "CreateSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

A description of the schema.

" + }, + "mappedInputFields":{ + "shape":"SchemaInputAttributes", + "documentation":"

A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information that Entity Resolution uses for matching.

" + }, + "schemaName":{ + "shape":"EntityName", + "documentation":"

The name of the schema. There cannot be multiple SchemaMappings with the same name.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "CreateSchemaMappingOutput":{ + "type":"structure", + "required":[ + "description", + "mappedInputFields", + "schemaArn", + "schemaName" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

A description of the schema.

" + }, + "mappedInputFields":{ + "shape":"SchemaInputAttributes", + "documentation":"

A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information that Entity Resolution uses for matching.

" + }, + "schemaArn":{ + "shape":"SchemaMappingArn", + "documentation":"

The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.

" + }, + "schemaName":{ + "shape":"EntityName", + "documentation":"

The name of the schema.

" + } + } + }, + "DeleteMatchingWorkflowInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow to be retrieved.

", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "DeleteMatchingWorkflowOutput":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

A successful operation message.

" + } + } + }, + "DeleteSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "schemaName":{ + "shape":"EntityName", + "documentation":"

The name of the schema to delete.

", + "location":"uri", + "locationName":"schemaName" + } + } + }, + "DeleteSchemaMappingOutput":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

A successful operation message.

" + } + } + }, + "Description":{ + "type":"string", + "max":255, + "min":0 + }, + "EntityName":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9-]*$" + }, + "ErrorDetails":{ + "type":"structure", + "members":{ + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

The error message from the job, if there is one.

" + } + }, + "documentation":"

An object containing an error message, if there was an error.

" + }, + "ErrorMessage":{ + "type":"string", + "max":2048, + "min":1 + }, + "ExceedsLimitException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request was rejected because it attempted to create resources beyond the current AWS Entity Resolution account limits. The error message describes the limit exceeded. HTTP Status Code: 402

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "GetMatchIdInput":{ + "type":"structure", + "required":[ + "record", + "workflowName" + ], + "members":{ + "record":{ + "shape":"RecordAttributeMap", + "documentation":"

The record to fetch the Match ID for.

" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow.

", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchIdOutput":{ + "type":"structure", + "members":{ + "matchId":{ + "shape":"String", + "documentation":"

The unique identifiers for this group of match records.

" + } + } + }, + "GetMatchingJobInput":{ + "type":"structure", + "required":[ + "jobId", + "workflowName" + ], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the job.

", + "location":"uri", + "locationName":"jobId" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow.

", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchingJobOutput":{ + "type":"structure", + "required":[ + "jobId", + "startTime", + "status" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the job has finished.

" + }, + "errorDetails":{ + "shape":"ErrorDetails", + "documentation":"

An object containing an error message, if there was an error.

" + }, + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the job.

" + }, + "metrics":{ + "shape":"JobMetrics", + "documentation":"

Metrics associated with the execution, specifically total records processed, unique IDs generated, and records the execution skipped.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the job was started.

" + }, + "status":{ + "shape":"JobStatus", + "documentation":"

The current status of the job. Either running, succeeded, queued, or failed.

" + } + } + }, + "GetMatchingWorkflowInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow.

", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "GetMatchingWorkflowOutput":{ + "type":"structure", + "required":[ + "createdAt", + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "updatedAt", + "workflowArn", + "workflowName" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the workflow was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A description of the workflow.

" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"

An object which defines an incremental run type and has only incrementalRunType as a field.

" + }, + "inputSourceConfig":{ + "shape":"InputSourceConfig", + "documentation":"

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

" + }, + "outputSourceConfig":{ + "shape":"OutputSourceConfig", + "documentation":"

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

" + }, + "resolutionTechniques":{ + "shape":"ResolutionTechniques", + "documentation":"

An object which defines the resolutionType and the ruleBasedProperties

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access resources on your behalf.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the workflow was last updated.

" + }, + "workflowArn":{ + "shape":"MatchingWorkflowArn", + "documentation":"

The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.

" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow.

" + } + } + }, + "GetSchemaMappingInput":{ + "type":"structure", + "required":["schemaName"], + "members":{ + "schemaName":{ + "shape":"EntityName", + "documentation":"

The name of the schema to be retrieved.

", + "location":"uri", + "locationName":"schemaName" + } + } + }, + "GetSchemaMappingOutput":{ + "type":"structure", + "required":[ + "createdAt", + "mappedInputFields", + "schemaArn", + "schemaName", + "updatedAt" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the SchemaMapping was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A description of the schema.

" + }, + "mappedInputFields":{ + "shape":"SchemaInputAttributes", + "documentation":"

A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information Venice uses for matching.

" + }, + "schemaArn":{ + "shape":"SchemaMappingArn", + "documentation":"

The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.

" + }, + "schemaName":{ + "shape":"EntityName", + "documentation":"

The name of the schema.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the SchemaMapping was last updated.

" + } + } + }, + "IncrementalRunConfig":{ + "type":"structure", + "members":{ + "incrementalRunType":{ + "shape":"IncrementalRunType", + "documentation":"

The type of incremental run. It takes only one value: IMMEDIATE.

" + } + }, + "documentation":"

An object which defines an incremental run type and has only incrementalRunType as a field.

" + }, + "IncrementalRunType":{ + "type":"string", + "enum":["IMMEDIATE"] + }, + "InputSource":{ + "type":"structure", + "required":[ + "inputSourceARN", + "schemaName" + ], + "members":{ + "applyNormalization":{ + "shape":"Boolean", + "documentation":"

Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in the input table is in a format of 1234567890, Entity Resolution will normalize this field in the output to (123)-456-7890.

" + }, + "inputSourceARN":{ + "shape":"InputSourceInputSourceARNString", + "documentation":"

An Glue table ARN for the input source table.

" + }, + "schemaName":{ + "shape":"EntityName", + "documentation":"

The name of the schema to be retrieved.

" + } + }, + "documentation":"

An object containing InputSourceARN, SchemaName, and ApplyNormalization.

" + }, + "InputSourceConfig":{ + "type":"list", + "member":{"shape":"InputSource"}, + "max":20, + "min":1 + }, + "InputSourceInputSourceARNString":{ + "type":"string", + "pattern":"^arn:aws:.*:.*:[0-9]+:.*$" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

This exception occurs when there is an internal failure in the AWS Entity Resolution service. HTTP Status Code: 500

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "JobId":{ + "type":"string", + "pattern":"^[a-f0-9]{32}$" + }, + "JobList":{ + "type":"list", + "member":{"shape":"JobSummary"} + }, + "JobMetrics":{ + "type":"structure", + "members":{ + "inputRecords":{ + "shape":"Integer", + "documentation":"

The total number of input records.

" + }, + "matchIDs":{ + "shape":"Integer", + "documentation":"

The total number of matchIDs generated.

" + }, + "recordsNotProcessed":{ + "shape":"Integer", + "documentation":"

The total number of records that did not get processed,

" + }, + "totalRecordsProcessed":{ + "shape":"Integer", + "documentation":"

The total number of records processed.

" + } + }, + "documentation":"

An object containing InputRecords, TotalRecordsProcessed, MatchIDs, and RecordsNotProcessed.

" + }, + "JobStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED", + "QUEUED" + ] + }, + "JobSummary":{ + "type":"structure", + "required":[ + "jobId", + "startTime", + "status" + ], + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the job has finished.

" + }, + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the job.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the job was started.

" + }, + "status":{ + "shape":"JobStatus", + "documentation":"

The current status of the job. Either running, succeeded, queued, or failed.

" + } + }, + "documentation":"

An object containing the JobId, Status, StartTime, and EndTime of a job.

" + }, + "KMSArn":{ + "type":"string", + "pattern":"^arn:aws:kms:.*:[0-9]+:.*$" + }, + "ListMatchingJobsInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "maxResults":{ + "shape":"ListMatchingJobsInputMaxResultsInteger", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from the previous ListSchemaMappings API call.

", + "location":"querystring", + "locationName":"nextToken" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow to be retrieved.

", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "ListMatchingJobsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "ListMatchingJobsOutput":{ + "type":"structure", + "members":{ + "jobs":{ + "shape":"JobList", + "documentation":"

A list of JobSummary objects, each of which contain the ID, status, start time, and end time of a job.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from the previous ListSchemaMappings API call.

" + } + } + }, + "ListMatchingWorkflowsInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListMatchingWorkflowsInputMaxResultsInteger", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from the previous ListSchemaMappings API call.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListMatchingWorkflowsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25 + }, + "ListMatchingWorkflowsOutput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from the previous ListSchemaMappings API call.

" + }, + "workflowSummaries":{ + "shape":"MatchingWorkflowList", + "documentation":"

A list of MatchingWorkflowSummary objects, each of which contain the fields WorkflowName, WorkflowArn, CreatedAt, and UpdatedAt.

" + } + } + }, + "ListSchemaMappingsInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListSchemaMappingsInputMaxResultsInteger", + "documentation":"

The maximum number of objects returned per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from the previous ListSchemaMappings API call.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListSchemaMappingsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25 + }, + "ListSchemaMappingsOutput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from the previous ListDomains API call.

" + }, + "schemaList":{ + "shape":"SchemaMappingList", + "documentation":"

A list of SchemaMappingSummary objects, each of which contain the fields SchemaName, SchemaArn, CreatedAt, UpdatedAt.

" + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"VeniceGlobalArn", + "documentation":"

The ARN of the resource for which you want to view tags.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "MatchingWorkflowArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-us-gov|aws-cn):entityresolution:.*:[0-9]+:(matchingworkflow/.*)$" + }, + "MatchingWorkflowList":{ + "type":"list", + "member":{"shape":"MatchingWorkflowSummary"} + }, + "MatchingWorkflowSummary":{ + "type":"structure", + "required":[ + "createdAt", + "updatedAt", + "workflowArn", + "workflowName" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the workflow was created.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the workflow was last updated.

" + }, + "workflowArn":{ + "shape":"MatchingWorkflowArn", + "documentation":"

The ARN (Amazon Resource Name) that Entity Resolution generated for the MatchingWorkflow.

" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow.

" + } + }, + "documentation":"

A list of MatchingWorkflowSummary objects, each of which contain the fields WorkflowName, WorkflowArn, CreatedAt, UpdatedAt.

" + }, + "NextToken":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[a-zA-Z_0-9-=+/]*$" + }, + "OutputAttribute":{ + "type":"structure", + "required":["name"], + "members":{ + "hashed":{ + "shape":"Boolean", + "documentation":"

Enables the ability to hash the column values in the output.

" + }, + "name":{ + "shape":"AttributeName", + "documentation":"

A name of a column to be written to the output. This must be an InputField name in the schema mapping.

" + } + }, + "documentation":"

A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.

" + }, + "OutputSource":{ + "type":"structure", + "required":[ + "output", + "outputS3Path" + ], + "members":{ + "KMSArn":{ + "shape":"KMSArn", + "documentation":"

Customer KMS ARN for encryption at rest. If not provided, system will use an Entity Resolution managed KMS key.

" + }, + "applyNormalization":{ + "shape":"Boolean", + "documentation":"

Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in the input table is in a format of 1234567890, Entity Resolution will normalize this field in the output to (123)-456-7890.

" + }, + "output":{ + "shape":"OutputSourceOutputList", + "documentation":"

A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.

" + }, + "outputS3Path":{ + "shape":"OutputSourceOutputS3PathString", + "documentation":"

The S3 path to which Entity Resolution will write the output table.

" + } + }, + "documentation":"

A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.

" + }, + "OutputSourceConfig":{ + "type":"list", + "member":{"shape":"OutputSource"}, + "max":1, + "min":1 + }, + "OutputSourceOutputList":{ + "type":"list", + "member":{"shape":"OutputAttribute"}, + "max":750, + "min":0 + }, + "OutputSourceOutputS3PathString":{ + "type":"string", + "pattern":"^s3://([^/]+)/?(.*?([^/]+)/?)$" + }, + "RecordAttributeMap":{ + "type":"map", + "key":{"shape":"RecordAttributeMapKeyString"}, + "value":{"shape":"RecordAttributeMapValueString"}, + "sensitive":true + }, + "RecordAttributeMapKeyString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9- \\t]*$" + }, + "RecordAttributeMapValueString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9-.@ ()+\\t]*$" + }, + "ResolutionTechniques":{ + "type":"structure", + "members":{ + "resolutionType":{ + "shape":"ResolutionType", + "documentation":"

There are two types of matching, RULE_MATCHING and ML_MATCHING

" + }, + "ruleBasedProperties":{ + "shape":"RuleBasedProperties", + "documentation":"

An object which defines the list of matching rules to run and has a field Rules, which is a list of rule objects.

" + } + }, + "documentation":"

An object which defines the resolutionType and the ruleBasedProperties

" + }, + "ResolutionType":{ + "type":"string", + "enum":[ + "RULE_MATCHING", + "ML_MATCHING" + ] + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource could not be found. HTTP Status Code: 404

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "Rule":{ + "type":"structure", + "required":[ + "matchingKeys", + "ruleName" + ], + "members":{ + "matchingKeys":{ + "shape":"RuleMatchingKeysList", + "documentation":"

A list of MatchingKeys. The MatchingKeys must have been defined in the SchemaMapping. Two records are considered to match according to this rule if all of the MatchingKeys match.

" + }, + "ruleName":{ + "shape":"RuleRuleNameString", + "documentation":"

A name for the matching rule.

" + } + }, + "documentation":"

An object containing RuleName, and MatchingKeys.

" + }, + "RuleBasedProperties":{ + "type":"structure", + "required":[ + "attributeMatchingModel", + "rules" + ], + "members":{ + "attributeMatchingModel":{ + "shape":"AttributeMatchingModel", + "documentation":"

You can either choose ONE_TO_ONE or MANY_TO_MANY as the AttributeMatchingModel. When choosing MANY_TO_MANY, the system can match attribute across the sub-types of an attribute type. For example, if the value of the Email field of Profile A and the value of BusinessEmail field of Profile B matches, the two profiles are matched on the Email type. When choosing ONE_TO_ONE the system can only match if the sub-types are exact matches. For example, only when the value of the Email field of Profile A and the value of the Email field of Profile B matches, the two profiles are matched on the Email type.

" + }, + "rules":{ + "shape":"RuleBasedPropertiesRulesList", + "documentation":"

A list of Rule objects, each of which have fields RuleName and MatchingKeys.

" + } + }, + "documentation":"

An object which defines the list of matching rules to run and has a field Rules, which is a list of rule objects.

" + }, + "RuleBasedPropertiesRulesList":{ + "type":"list", + "member":{"shape":"Rule"}, + "max":15, + "min":1 + }, + "RuleMatchingKeysList":{ + "type":"list", + "member":{"shape":"AttributeName"}, + "max":15, + "min":1 + }, + "RuleRuleNameString":{ + "type":"string", + "max":255, + "min":0, + "pattern":"^[a-zA-Z_0-9- \\t]*$" + }, + "SchemaAttributeType":{ + "type":"string", + "enum":[ + "NAME", + "NAME_FIRST", + "NAME_MIDDLE", + "NAME_LAST", + "ADDRESS", + "ADDRESS_STREET1", + "ADDRESS_STREET2", + "ADDRESS_STREET3", + "ADDRESS_CITY", + "ADDRESS_STATE", + "ADDRESS_COUNTRY", + "ADDRESS_POSTALCODE", + "PHONE", + "PHONE_NUMBER", + "PHONE_COUNTRYCODE", + "EMAIL_ADDRESS", + "UNIQUE_ID", + "DATE", + "STRING" + ] + }, + "SchemaInputAttribute":{ + "type":"structure", + "required":[ + "fieldName", + "type" + ], + "members":{ + "fieldName":{ + "shape":"AttributeName", + "documentation":"

A string containing the field name.

" + }, + "groupName":{ + "shape":"AttributeName", + "documentation":"

Instruct Entity Resolution to combine several columns into a unified column with the identical attribute type. For example, when working with columns such as first_name, middle_name, and last_name, assigning them a common GroupName will prompt Entity Resolution to concatenate them into a single value.

" + }, + "matchKey":{ + "shape":"AttributeName", + "documentation":"

A key that allows grouping of multiple input attributes into a unified matching group. For example, let's consider a scenario where the source table contains various addresses, such as business_address and shipping_address. By assigning the MatchKey Address' to both attributes, Entity Resolution will match records across these fields to create a consolidated matching group. If no MatchKey is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.

" + }, + "type":{ + "shape":"SchemaAttributeType", + "documentation":"

The type of the attribute, selected from a list of values.

" + } + }, + "documentation":"

An object containing FieldField, Type, GroupName, and MatchKey.

" + }, + "SchemaInputAttributes":{ + "type":"list", + "member":{"shape":"SchemaInputAttribute"}, + "max":25, + "min":2 + }, + "SchemaMappingArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-us-gov|aws-cn):entityresolution:.*:[0-9]+:(schemamapping/.*)$" + }, + "SchemaMappingList":{ + "type":"list", + "member":{"shape":"SchemaMappingSummary"} + }, + "SchemaMappingSummary":{ + "type":"structure", + "required":[ + "createdAt", + "schemaArn", + "schemaName", + "updatedAt" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the SchemaMapping was created.

" + }, + "schemaArn":{ + "shape":"SchemaMappingArn", + "documentation":"

The ARN (Amazon Resource Name) that Entity Resolution generated for the SchemaMapping.

" + }, + "schemaName":{ + "shape":"EntityName", + "documentation":"

The name of the schema.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the SchemaMapping was last updated.

" + } + }, + "documentation":"

An object containing SchemaName, SchemaArn, CreatedAt, andUpdatedAt.

" + }, + "StartMatchingJobInput":{ + "type":"structure", + "required":["workflowName"], + "members":{ + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the matching job to be retrieved.

", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "StartMatchingJobOutput":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

The ID of the job.

" + } + } + }, + "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"VeniceGlobalArn", + "documentation":"

The ARN of the resource for which you want to view tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request was denied due to request throttling. HTTP Status Code: 429

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"VeniceGlobalArn", + "documentation":"

The ARN of the resource for which you want to untag.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of tag keys to remove from the resource.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateMatchingWorkflowInput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowName" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

A description of the workflow.

" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"

An object which defines an incremental run type and has only incrementalRunType as a field.

" + }, + "inputSourceConfig":{ + "shape":"InputSourceConfig", + "documentation":"

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

" + }, + "outputSourceConfig":{ + "shape":"OutputSourceConfig", + "documentation":"

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

" + }, + "resolutionTechniques":{ + "shape":"ResolutionTechniques", + "documentation":"

An object which defines the resolutionType and the ruleBasedProperties

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow to be retrieved.

", + "location":"uri", + "locationName":"workflowName" + } + } + }, + "UpdateMatchingWorkflowOutput":{ + "type":"structure", + "required":[ + "inputSourceConfig", + "outputSourceConfig", + "resolutionTechniques", + "roleArn", + "workflowName" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

A description of the workflow.

" + }, + "incrementalRunConfig":{ + "shape":"IncrementalRunConfig", + "documentation":"

An object which defines an incremental run type and has only incrementalRunType as a field.

" + }, + "inputSourceConfig":{ + "shape":"InputSourceConfig", + "documentation":"

A list of InputSource objects, which have the fields InputSourceARN and SchemaName.

" + }, + "outputSourceConfig":{ + "shape":"OutputSourceConfig", + "documentation":"

A list of OutputSource objects, each of which contains fields OutputS3Path, ApplyNormalization, and Output.

" + }, + "resolutionTechniques":{ + "shape":"ResolutionTechniques", + "documentation":"

An object which defines the resolutionType and the ruleBasedProperties

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.

" + }, + "workflowName":{ + "shape":"EntityName", + "documentation":"

The name of the workflow.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The input fails to satisfy the constraints specified by AWS Entity Resolution. HTTP Status Code: 400

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "VeniceGlobalArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-us-gov|aws-cn):(entityresolution):.*:[0-9]+:((schemamapping|matchingworkflow)/[a-zA-Z0-9_-]+)$" + } + }, + "documentation":"

Welcome to the AWS Entity Resolution API Reference.

AWS Entity Resolution is an AWS service that provides pre-configured entity resolution capabilities that enable developers and analysts at advertising and marketing companies to build an accurate and complete view of their consumers.

With AWS Entity Resolution, you have the ability to match source records containing consumer identifiers, such as name, email address, and phone number. This holds true even when these records have incomplete or conflicting identifiers. For example, AWS Entity Resolution can effectively match a source record from a customer relationship management (CRM) system, which includes account information like first name, last name, postal address, phone number, and email address, with a source record from a marketing system containing campaign information, such as username and email address.

To learn more about AWS Entity Resolution concepts, procedures, and best practices, see the AWS Entity Resolution User Guide.

" +} diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 3186e05aee1..a45f5847df6 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/evidently/pom.xml b/services/evidently/pom.xml index 50817fdc989..acc25afde89 100644 --- a/services/evidently/pom.xml +++ b/services/evidently/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT evidently AWS Java SDK :: Services :: Evidently diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index 1199d996c81..ec71cb652e9 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index 5c0d3a409a5..28fa3c4089c 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index f6aa21c3fb1..9fc57ac1a88 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/fis/pom.xml b/services/fis/pom.xml index f718fbcadda..7d054d67e15 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fms/pom.xml b/services/fms/pom.xml index 1acbe1a9ab6..a6860a565ab 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index d413a4fb275..b2af7ebd8da 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index e6c260e861a..206ab67690b 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 0c921c098db..78c54254c90 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index 216974775e7..eb97d843d2d 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/fsx/src/main/resources/codegen-resources/endpoint-rule-set.json index b5cc1311213..54ee7440d48 100644 --- a/services/fsx/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/fsx/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://fsx-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://fsx-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://fsx-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://fsx-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://fsx.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://fsx.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://fsx.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://fsx.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index 3f392d54c8e..131e139cb34 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -43,7 +43,7 @@ {"shape":"DataRepositoryTaskEnded"}, {"shape":"InternalServerError"} ], - "documentation":"

Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following.

  • Any files that FSx has already exported are not reverted.

  • FSx continues to export any files that are \"in-flight\" when the cancel operation is received.

  • FSx does not export any files that have not yet been exported.

", + "documentation":"

Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel am export task, Amazon FSx does the following.

  • Any files that FSx has already exported are not reverted.

  • FSx continues to export any files that are in-flight when the cancel operation is received.

  • FSx does not export any files that have not yet been exported.

For a release task, Amazon FSx will stop releasing files upon cancellation. Any files that have already been released will remain in the released state.

", "idempotent":true }, "CopyBackup":{ @@ -127,7 +127,7 @@ {"shape":"InternalServerError"}, {"shape":"DataRepositoryTaskExecuting"} ], - "documentation":"

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

", + "documentation":"

Creates an Amazon FSx for Lustre data repository task. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system.

You use import and export data repository tasks to perform bulk operations between your FSx for Lustre file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository.

You use release data repository tasks to release data from your file system for files that are archived to S3. The metadata of released files remains on the file system so users or applications can still access released files by reading the files again, which will restore data from Amazon S3 to the FSx for Lustre file system.

To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

", "idempotent":true }, "CreateFileCache":{ @@ -692,7 +692,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For FSx for Windows File Server file systems, you can update the following properties:

  • AuditLogConfiguration

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • SelfManagedActiveDirectoryConfiguration

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

For FSx for Lustre file systems, you can update the following properties:

  • AutoImportPolicy

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • DataCompressionType

  • LogConfiguration

  • LustreRootSquashConfiguration

  • StorageCapacity

  • WeeklyMaintenanceStartTime

For FSx for ONTAP file systems, you can update the following properties:

  • AddRouteTableIds

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • DiskIopsConfiguration

  • FsxAdminPassword

  • RemoveRouteTableIds

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

For FSx for OpenZFS file systems, you can update the following properties:

  • AutomaticBackupRetentionDays

  • CopyTagsToBackups

  • CopyTagsToVolumes

  • DailyAutomaticBackupStartTime

  • DiskIopsConfiguration

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

" + "documentation":"

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For FSx for Windows File Server file systems, you can update the following properties:

  • AuditLogConfiguration

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • SelfManagedActiveDirectoryConfiguration

  • StorageCapacity

  • StorageType

  • ThroughputCapacity

  • DiskIopsConfiguration

  • WeeklyMaintenanceStartTime

For FSx for Lustre file systems, you can update the following properties:

  • AutoImportPolicy

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • DataCompressionType

  • LogConfiguration

  • LustreRootSquashConfiguration

  • StorageCapacity

  • WeeklyMaintenanceStartTime

For FSx for ONTAP file systems, you can update the following properties:

  • AddRouteTableIds

  • AutomaticBackupRetentionDays

  • DailyAutomaticBackupStartTime

  • DiskIopsConfiguration

  • FsxAdminPassword

  • RemoveRouteTableIds

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

For FSx for OpenZFS file systems, you can update the following properties:

  • AutomaticBackupRetentionDays

  • CopyTagsToBackups

  • CopyTagsToVolumes

  • DailyAutomaticBackupStartTime

  • DiskIopsConfiguration

  • StorageCapacity

  • ThroughputCapacity

  • WeeklyMaintenanceStartTime

" }, "UpdateSnapshot":{ "name":"UpdateSnapshot", @@ -846,7 +846,7 @@ }, "AdministrativeActionType":{ "type":"string", - "documentation":"

Describes the type of administrative action, as follows:

  • FILE_SYSTEM_UPDATE - A file system update administrative action initiated from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).

  • STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's storage capacity has been completed successfully, a STORAGE_OPTIMIZATION task starts.

    • For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.

    • For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.

    You can track the storage-optimization progress using the ProgressPercent property. When STORAGE_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide, Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide, and Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

  • FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see AssociateFileSystemAliases.

  • FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system. For more information, see DisassociateFileSystemAliases.

  • VOLUME_UPDATE - A volume update to an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateVolume), or CLI (update-volume).

  • VOLUME_RESTORE - An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (RestoreVolumeFromSnapshot) or CLI (restore-volume-from-snapshot).

  • SNAPSHOT_UPDATE - A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateSnapshot), or CLI (update-snapshot).

  • RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.

", + "documentation":"

Describes the type of administrative action, as follows:

  • FILE_SYSTEM_UPDATE - A file system update administrative action initiated from the Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).

  • THROUGHPUT_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a THROUGHPUT_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When THROUGHPUT_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing throughput capacity in the Amazon FSx for Windows File Server User Guide.

  • STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's storage capacity has been completed successfully, a STORAGE_OPTIMIZATION task starts.

    • For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.

    • For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.

    You can track the storage-optimization progress using the ProgressPercent property. When STORAGE_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide, Managing storage and throughput capacity in the Amazon FSx for Lustre User Guide, and Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.

  • FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see AssociateFileSystemAliases.

  • FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system. For more information, see DisassociateFileSystemAliases.

  • IOPS_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a IOPS_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When IOPS_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more information, see Managing provisioned SSD IOPS in the Amazon FSx for Windows File Server User Guide.

  • STORAGE_TYPE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase a file system's throughput capacity has been completed successfully, a STORAGE_TYPE_OPTIMIZATION task starts.

    You can track the storage-optimization progress using the ProgressPercent property. When STORAGE_TYPE_OPTIMIZATION has been completed successfully, the parent FILE_SYSTEM_UPDATE action status changes to COMPLETED.

  • VOLUME_UPDATE - A volume update to an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateVolume), or CLI (update-volume).

  • VOLUME_RESTORE - An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (RestoreVolumeFromSnapshot) or CLI (restore-volume-from-snapshot).

  • SNAPSHOT_UPDATE - A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateSnapshot), or CLI (update-snapshot).

  • RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.

", "enum":[ "FILE_SYSTEM_UPDATE", "STORAGE_OPTIMIZATION", @@ -855,7 +855,10 @@ "VOLUME_UPDATE", "SNAPSHOT_UPDATE", "RELEASE_NFS_V3_LOCKS", - "VOLUME_RESTORE" + "VOLUME_RESTORE", + "THROUGHPUT_OPTIMIZATION", + "IOPS_OPTIMIZATION", + "STORAGE_TYPE_OPTIMIZATION" ] }, "AdministrativeActions":{ @@ -1352,11 +1355,11 @@ "members":{ "Type":{ "shape":"DataRepositoryTaskType", - "documentation":"

Specifies the type of data repository task to create.

" + "documentation":"

Specifies the type of data repository task to create.

  • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

  • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

  • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that are archived and that meet your specified release criteria.

  • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

" }, "Paths":{ "shape":"DataRepositoryTaskPaths", - "documentation":"

A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails.

  • For export tasks, the list contains paths on the Amazon FSx file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1.

  • For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the Amazon FSx file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional).

" + "documentation":"

A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all archived files that meet the last accessed time criteria (for release tasks).

  • For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1.

  • For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional).

  • For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release archived files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all archived files in the file system, specify a forward slash (/) as the path.

    A file must also meet the last accessed time criteria specified in for the file to be released.

" }, "FileSystemId":{"shape":"FileSystemId"}, "Report":{ @@ -1371,6 +1374,10 @@ "CapacityToRelease":{ "shape":"CapacityToRelease", "documentation":"

Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.

" + }, + "ReleaseConfiguration":{ + "shape":"ReleaseConfiguration", + "documentation":"

The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system.

" } } }, @@ -1650,7 +1657,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

  • SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MBps. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.

  • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.

For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

" + "documentation":"

Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

  • MULTI_AZ_1- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1 is available in the following Amazon Web Services Regions:

  • SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.

  • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.

For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

" }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -1661,6 +1668,18 @@ "RootVolumeConfiguration":{ "shape":"OpenZFSCreateRootVolumeConfiguration", "documentation":"

The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume.

" + }, + "PreferredSubnetId":{ + "shape":"SubnetId", + "documentation":"

Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.

" + }, + "EndpointIpAddressRange":{ + "shape":"IpAddressRange", + "documentation":"

(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.

" + }, + "RouteTableIds":{ + "shape":"RouteTableIds", + "documentation":"

(Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

" } }, "documentation":"

The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.

" @@ -1774,6 +1793,10 @@ "AuditLogConfiguration":{ "shape":"WindowsAuditLogCreateConfiguration", "documentation":"

The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.

" + }, + "DiskIopsConfiguration":{ + "shape":"DiskIopsConfiguration", + "documentation":"

The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.

" } }, "documentation":"

The configuration object for the Microsoft Windows file system used in CreateFileSystem and CreateFileSystemFromBackup operations.

" @@ -1901,7 +1924,7 @@ }, "PrivilegedDelete":{ "shape":"PrivilegedDelete", - "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete WORM files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" + "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete WORM files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" }, "RetentionPeriod":{ "shape":"SnaplockRetentionPeriod", @@ -1909,7 +1932,7 @@ }, "SnaplockType":{ "shape":"SnaplockType", - "documentation":"

Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. You can choose one of the following retention modes:

  • COMPLIANCE: Files transitioned to write once, read many (WORM) on a Compliance volume can't be deleted until their retention periods expire. This retention mode is used to address government or industry-specific mandates or to protect against ransomware attacks. For more information, see SnapLock Compliance.

  • ENTERPRISE: Files transitioned to WORM on an Enterprise volume can be deleted by authorized users before their retention periods expire using privileged delete. This retention mode is used to advance an organization's data integrity and internal compliance or to test retention settings before using SnapLock Compliance. For more information, see SnapLock Enterprise.

" + "documentation":"

Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. You can choose one of the following retention modes:

  • COMPLIANCE: Files transitioned to write once, read many (WORM) on a Compliance volume can't be deleted until their retention periods expire. This retention mode is used to address government or industry-specific mandates or to protect against ransomware attacks. For more information, see SnapLock Compliance.

  • ENTERPRISE: Files transitioned to WORM on an Enterprise volume can be deleted by authorized users before their retention periods expire using privileged delete. This retention mode is used to advance an organization's data integrity and internal compliance or to test retention settings before using SnapLock Compliance. For more information, see SnapLock Enterprise.

" }, "VolumeAppendModeEnabled":{ "shape":"Flag", @@ -2240,7 +2263,7 @@ }, "Type":{ "shape":"DataRepositoryTaskType", - "documentation":"

The type of data repository task.

  • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

  • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

  • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

  • RELEASE_DATA_FROM_FILESYSTEM tasks are not supported.

" + "documentation":"

The type of data repository task.

  • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

  • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

  • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that are archived and that meet your specified release criteria.

  • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

" }, "CreationTime":{"shape":"CreationTime"}, "StartTime":{ @@ -2277,9 +2300,13 @@ "FileCacheId":{ "shape":"FileCacheId", "documentation":"

The system-generated, unique ID of the cache.

" + }, + "ReleaseConfiguration":{ + "shape":"ReleaseConfiguration", + "documentation":"

The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system.

" } }, - "documentation":"

A description of the data repository task. You use data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository. An Amazon File Cache resource uses a task to automatically release files from the cache.

" + "documentation":"

A description of the data repository task.

  • You use import and export data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository.

  • You use release data repository tasks to release archived files from your Amazon FSx for Lustre file system.

  • An Amazon File Cache resource uses a task to automatically release files from the cache.

To learn more about data repository tasks, see Data Repository Tasks.

" }, "DataRepositoryTaskEnded":{ "type":"structure", @@ -2712,7 +2739,7 @@ "FinalBackupTags":{"shape":"Tags"}, "BypassSnaplockEnterpriseRetention":{ "shape":"Flag", - "documentation":"

Setting this to true allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. The IAM permission fsx:BypassSnaplockEnterpriseRetention is also required to delete SnapLock Enterprise volumes with unexpired WORM files. The default value is false.

For more information, see Deleting a SnapLock volume .

" + "documentation":"

Setting this to true allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume with unexpired write once, read many (WORM) files. The IAM permission fsx:BypassSnaplockEnterpriseRetention is also required to delete SnapLock Enterprise volumes with unexpired WORM files. The default value is false.

For more information, see Deleting a SnapLock volume.

" } }, "documentation":"

Use to specify skipping a final backup, adding tags to a final backup, or bypassing the retention period of an FSx for ONTAP SnapLock Enterprise volume when deleting an FSx for ONTAP volume.

" @@ -3088,7 +3115,7 @@ "documentation":"

The total number of SSD IOPS provisioned for the file system.

" } }, - "documentation":"

The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or FSx for OpenZFS file system. By default, Amazon FSx automatically provisions 3 IOPS per GB of storage capacity. You can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how it is was provisioned, or the mode (by the customer or by Amazon FSx).

" + "documentation":"

The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP, Amazon FSx for Windows File Server, or FSx for OpenZFS file system. By default, Amazon FSx automatically provisions 3 IOPS per GB of storage capacity. You can provision additional IOPS per GB of storage. The configuration consists of the total number of provisioned SSD IOPS and how it is was provisioned, or the mode (by the customer or by Amazon FSx).

" }, "DiskIopsConfigurationMode":{ "type":"string", @@ -3110,6 +3137,20 @@ "READ" ] }, + "DurationSinceLastAccess":{ + "type":"structure", + "members":{ + "Unit":{ + "shape":"Unit", + "documentation":"

The unit of time used by the Value parameter to determine if a file can be released, based on when it was last accessed. DAYS is the only supported value. This is a required parameter.

" + }, + "Value":{ + "shape":"Value", + "documentation":"

An integer that represents the minimum amount of time (in days) since a file was last accessed in the file system. Only archived files with a MAX(atime, ctime, mtime) timestamp that is more than this amount of time in the past (relative to the task create time) will be released. The default of Value is 0. This is a required parameter.

If an archived file meets the last accessed time criteria, its file or directory path must also be specified in the Paths parameter of the operation in order for the file to be released.

" + } + }, + "documentation":"

Defines the minimum amount of time since last access for a file to be eligible for release. Only archived files that were last accessed or modified before this point-in-time are eligible to be released from the Amazon FSx for Lustre file system.

" + }, "EndTime":{"type":"timestamp"}, "ErrorMessage":{ "type":"string", @@ -4270,7 +4311,8 @@ "type":"string", "enum":[ "SINGLE_AZ_1", - "SINGLE_AZ_2" + "SINGLE_AZ_2", + "MULTI_AZ_1" ] }, "OpenZFSFileSystemConfiguration":{ @@ -4288,7 +4330,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 SINGLE_AZ_1 and SINGLE_AZ_2.

" + "documentation":"

Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
 MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2.

" }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -4299,6 +4341,22 @@ "RootVolumeId":{ "shape":"VolumeId", "documentation":"

The ID of the root volume of the OpenZFS file system.

" + }, + "PreferredSubnetId":{ + "shape":"SubnetId", + "documentation":"

Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.

" + }, + "EndpointIpAddressRange":{ + "shape":"IpAddressRange", + "documentation":"

(Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.

" + }, + "RouteTableIds":{ + "shape":"RouteTableIds", + "documentation":"

(Multi-AZ only) The VPC route tables in which your file system's endpoints are created.

" + }, + "EndpointIpAddress":{ + "shape":"IpAddress", + "documentation":"

The IP address of the endpoint that is used to access data or to manage the file system.

" } }, "documentation":"

The configuration for the Amazon FSx for OpenZFS file system.

" @@ -4476,6 +4534,16 @@ "min":1, "pattern":"^[a-z0-9-]{1,20}$" }, + "ReleaseConfiguration":{ + "type":"structure", + "members":{ + "DurationSinceLastAccess":{ + "shape":"DurationSinceLastAccess", + "documentation":"

Defines the point-in-time since an archived file was last accessed, in order for that file to be eligible for release. Only files that were last accessed before this point-in-time are eligible to be released from the file system.

" + } + }, + "documentation":"

The configuration that specifies a minimum amount of time since last access for an archived file to be eligible for release from an Amazon FSx for Lustre file system. Only files that were last accessed before this point-in-time can be released. For example, if you specify a last accessed time criteria of 9 days, only files that were last accessed 9.00001 or more days ago can be released.

Only file data that has been archived can be released. Files that have not yet been archived, such as new or changed files that have not been exported, are not eligible for release. When files are released, their metadata stays on the file system, so they can still be accessed later. Users and applications can access a released file by reading the file again, which restores data from Amazon S3 to the FSx for Lustre file system.

If a file meets the last accessed time criteria, its file or directory path must also be specified with the Paths parameter of the operation in order for the file to be released.

" + }, "ReleaseFileSystemNfsV3LocksRequest":{ "type":"structure", "required":["FileSystemId"], @@ -4815,7 +4883,7 @@ }, "PrivilegedDelete":{ "shape":"PrivilegedDelete", - "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete write once, read many (WORM) files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" + "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete write once, read many (WORM) files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" }, "RetentionPeriod":{ "shape":"SnaplockRetentionPeriod", @@ -4823,7 +4891,7 @@ }, "SnaplockType":{ "shape":"SnaplockType", - "documentation":"

Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. You can choose one of the following retention modes:

  • COMPLIANCE: Files transitioned to write once, read many (WORM) on a Compliance volume can't be deleted until their retention periods expire. This retention mode is used to address government or industry-specific mandates or to protect against ransomware attacks. For more information, see SnapLock Compliance.

  • ENTERPRISE: Files transitioned to WORM on an Enterprise volume can be deleted by authorized users before their retention periods expire using privileged delete. This retention mode is used to advance an organization's data integrity and internal compliance or to test retention settings before using SnapLock Compliance. For more information, see SnapLock Enterprise.

" + "documentation":"

Specifies the retention mode of an FSx for ONTAP SnapLock volume. After it is set, it can't be changed. You can choose one of the following retention modes:

  • COMPLIANCE: Files transitioned to write once, read many (WORM) on a Compliance volume can't be deleted until their retention periods expire. This retention mode is used to address government or industry-specific mandates or to protect against ransomware attacks. For more information, see SnapLock Compliance.

  • ENTERPRISE: Files transitioned to WORM on an Enterprise volume can be deleted by authorized users before their retention periods expire using privileged delete. This retention mode is used to advance an organization's data integrity and internal compliance or to test retention settings before using SnapLock Compliance. For more information, see SnapLock Enterprise.

" }, "VolumeAppendModeEnabled":{ "shape":"Flag", @@ -5324,6 +5392,10 @@ "max":36, "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,36}$" }, + "Unit":{ + "type":"string", + "enum":["DAYS"] + }, "UnsupportedOperation":{ "type":"structure", "members":{ @@ -5499,7 +5571,15 @@ "documentation":"

The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:

  • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

  • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.

" }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, - "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"} + "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, + "AddRouteTableIds":{ + "shape":"RouteTableIds", + "documentation":"

(Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for OpenZFS file system.

" + }, + "RemoveRouteTableIds":{ + "shape":"RouteTableIds", + "documentation":"

(Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for OpenZFS file system. You can use the API operation to retrieve the list of VPC route table IDs for a file system.

" + } }, "documentation":"

The configuration updates for an Amazon FSx for OpenZFS file system.

" }, @@ -5529,7 +5609,8 @@ "OpenZFSConfiguration":{ "shape":"UpdateFileSystemOpenZFSConfiguration", "documentation":"

The configuration updates for an FSx for OpenZFS file system.

" - } + }, + "StorageType":{"shape":"StorageType"} }, "documentation":"

The request object for the UpdateFileSystem operation.

" }, @@ -5569,6 +5650,10 @@ "AuditLogConfiguration":{ "shape":"WindowsAuditLogCreateConfiguration", "documentation":"

The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system..

" + }, + "DiskIopsConfiguration":{ + "shape":"DiskIopsConfiguration", + "documentation":"

The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.

" } }, "documentation":"

Updates the configuration for an existing Amazon FSx for Windows File Server file system. Amazon FSx only overwrites existing properties with non-null values provided in the request.

" @@ -5658,7 +5743,7 @@ }, "PrivilegedDelete":{ "shape":"PrivilegedDelete", - "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete write once, read many (WORM) files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" + "documentation":"

Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete write once, read many (WORM) files even if they have active retention periods. PERMANENTLY_DISABLED is a terminal state. If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default value is DISABLED.

For more information, see Privileged delete.

" }, "RetentionPeriod":{ "shape":"SnaplockRetentionPeriod", @@ -5775,6 +5860,10 @@ } } }, + "Value":{ + "type":"long", + "min":0 + }, "Volume":{ "type":"structure", "members":{ @@ -5951,7 +6040,7 @@ }, "AuditLogDestination":{ "shape":"GeneralARN", - "documentation":"

The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN.

The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the aws-fsx prefix.

The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.

" + "documentation":"

The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN.

The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehose delivery stream must begin with the aws-fsx prefix.

The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.

" } }, "documentation":"

The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system. For more information, see File access auditing.

" @@ -5973,7 +6062,7 @@ }, "AuditLogDestination":{ "shape":"GeneralARN", - "documentation":"

The Amazon Resource Name (ARN) that specifies the destination of the audit logs.

The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements:

  • The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.

  • The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehouse delivery stream must begin with the aws-fsx prefix.

  • If you do not provide a destination in AuditLogDestination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group.

  • If AuditLogDestination is provided and the resource does not exist, the request will fail with a BadRequest error.

  • If FileAccessAuditLogLevel and FileShareAccessAuditLogLevel are both set to DISABLED, you cannot specify a destination in AuditLogDestination.

" + "documentation":"

The Amazon Resource Name (ARN) that specifies the destination of the audit logs.

The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements:

  • The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.

  • The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehose delivery stream must begin with the aws-fsx prefix.

  • If you do not provide a destination in AuditLogDestination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group.

  • If AuditLogDestination is provided and the resource does not exist, the request will fail with a BadRequest error.

  • If FileAccessAuditLogLevel and FileShareAccessAuditLogLevel are both set to DISABLED, you cannot specify a destination in AuditLogDestination.

" } }, "documentation":"

The Windows file access auditing configuration used when creating or updating an Amazon FSx for Windows File Server file system.

" @@ -6038,6 +6127,10 @@ "AuditLogConfiguration":{ "shape":"WindowsAuditLogConfiguration", "documentation":"

The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.

" + }, + "DiskIopsConfiguration":{ + "shape":"DiskIopsConfiguration", + "documentation":"

The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.

" } }, "documentation":"

The configuration for this Microsoft Windows file system.

" diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 88865cd986e..551541fdcb0 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gamesparks/pom.xml b/services/gamesparks/pom.xml index b5928b682fc..8ad8c73c539 100644 --- a/services/gamesparks/pom.xml +++ b/services/gamesparks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT gamesparks AWS Java SDK :: Services :: Game Sparks diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index 1987649b4da..3eb80c729a5 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index e5dfa000e03..b9a66177e18 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/globalaccelerator/src/main/resources/codegen-resources/endpoint-rule-set.json index 66e10dd2e5c..d26fc19a37e 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/globalaccelerator/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "parseURL", + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,221 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://globalaccelerator-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] }, { "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] - }, + } + ], + "type": "tree", + "rules": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsDualStack" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://globalaccelerator-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://globalaccelerator-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://globalaccelerator-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsDualStack" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://globalaccelerator.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://globalaccelerator.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://globalaccelerator.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://globalaccelerator.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/endpoint-tests.json b/services/globalaccelerator/src/main/resources/codegen-resources/endpoint-tests.json index ff1bae97f53..de6333056e7 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/globalaccelerator/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,15 +1,280 @@ { "testCases": [ { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://example.com" + "url": "https://globalaccelerator-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://globalaccelerator.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -20,8 +285,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -32,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json b/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json index 0069e90062f..cb7a9c9eda5 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json +++ b/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json @@ -48,7 +48,7 @@ {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Add endpoints to an endpoint group. The AddEndpoints API operation is the recommended option for adding endpoints. The alternative options are to add endpoints when you create an endpoint group (with the CreateEndpointGroup API) or when you update an endpoint group (with the UpdateEndpointGroup API).

There are two advantages to using AddEndpoints to add endpoints:

  • It's faster, because Global Accelerator only has to resolve the new endpoints that you're adding.

  • It's more convenient, because you don't need to specify all of the current endpoints that are already in the endpoint group in addition to the new endpoints that you want to add.

" + "documentation":"

Add endpoints to an endpoint group. The AddEndpoints API operation is the recommended option for adding endpoints. The alternative options are to add endpoints when you create an endpoint group (with the CreateEndpointGroup API) or when you update an endpoint group (with the UpdateEndpointGroup API).

There are two advantages to using AddEndpoints to add endpoints in Global Accelerator:

  • It's faster, because Global Accelerator only has to resolve the new endpoints that you're adding, rather than resolving new and existing endpoints.

  • It's more convenient, because you don't need to specify the current endpoints that are already in the endpoint group, in addition to the new endpoints that you want to add.

For information about endpoint types and requirements for endpoints that you can add to Global Accelerator, see Endpoints for standard accelerators in the Global Accelerator Developer Guide.

" }, "AdvertiseByoipCidr":{ "name":"AdvertiseByoipCidr", @@ -94,7 +94,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.

" + "documentation":"

Create an accelerator. An accelerator includes one or more listeners that process inbound connections and direct traffic to one or more endpoint groups, each of which includes endpoints, such as Network Load Balancers.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.

" }, "CreateCustomRoutingAccelerator":{ "name":"CreateCustomRoutingAccelerator", @@ -110,7 +110,7 @@ {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints.

Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.

" + "documentation":"

Create a custom routing accelerator. A custom routing accelerator directs traffic to one of possibly thousands of Amazon EC2 instance destinations running in a single or multiple virtual private clouds (VPC) subnet endpoints.

Be aware that, by default, all destination EC2 instances in a VPC subnet endpoint cannot receive traffic. To enable all destinations to receive traffic, or to specify individual port mappings that can receive traffic, see the AllowCustomRoutingTraffic operation.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.

" }, "CreateCustomRoutingEndpointGroup":{ "name":"CreateCustomRoutingEndpointGroup", @@ -166,7 +166,7 @@ {"shape":"LimitExceededException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one Amazon Web Services Region. A resource must be valid and active when you add it as an endpoint.

" + "documentation":"

Create an endpoint group for the specified listener. An endpoint group is a collection of endpoints in one Amazon Web Services Region. A resource must be valid and active when you add it as an endpoint.

For more information about endpoint types and requirements for endpoints that you can add to Global Accelerator, see Endpoints for standard accelerators in the Global Accelerator Developer Guide.

" }, "CreateListener":{ "name":"CreateListener", @@ -678,7 +678,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidArgumentException"} ], - "documentation":"

Update an accelerator.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.

" + "documentation":"

Update an accelerator to make changes, such as the following:

  • Change the name of the accelerator.

  • Disable the accelerator so that it no longer accepts or routes traffic, or so that you can delete it.

  • Enable the accelerator, if it is disabled.

  • Change the IP address type to dual-stack if it is IPv4, or change the IP address type to IPv4 if it's dual-stack.

Be aware that static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete the accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.

" }, "UpdateAcceleratorAttributes":{ "name":"UpdateAcceleratorAttributes", @@ -3094,5 +3094,5 @@ } } }, - "documentation":"Global Accelerator

This is the Global Accelerator API Reference. This guide is for developers who need detailed information about Global Accelerator API actions, data types, and errors. For more information about Global Accelerator features, see the Global Accelerator Developer Guide.

Global Accelerator is a service in which you create accelerators to improve the performance of your applications for local and global users. Depending on the type of accelerator you choose, you can gain additional benefits.

  • By using a standard accelerator, you can improve availability of your internet applications that are used by a global audience. With a standard accelerator, Global Accelerator directs traffic to optimal endpoints over the Amazon Web Services global network.

  • For other scenarios, you might choose a custom routing accelerator. With a custom routing accelerator, you can use application logic to directly map one or more users to a specific endpoint among many endpoints.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on AWS CLI commands.

By default, Global Accelerator provides you with static IP addresses that you associate with your accelerator. The static IP addresses are anycast from the Amazon Web Services edge network. For IPv4, Global Accelerator provides two static IPv4 addresses. For dual-stack, Global Accelerator provides a total of four addresses: two static IPv4 addresses and two static IPv6 addresses. With a standard accelerator for IPv4, instead of using the addresses that Global Accelerator provides, you can configure these entry points to be IPv4 addresses from your own IP address ranges that you bring toGlobal Accelerator (BYOIP).

For a standard accelerator, they distribute incoming application traffic across multiple endpoint resources in multiple Amazon Web Services Regions , which increases the availability of your applications. Endpoints for standard accelerators can be Network Load Balancers, Application Load Balancers, Amazon EC2 instances, or Elastic IP addresses that are located in one Amazon Web Services Region or multiple Amazon Web Services Regions. For custom routing accelerators, you map traffic that arrives to the static IP addresses to specific Amazon EC2 servers in endpoints that are virtual private cloud (VPC) subnets.

The static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. You can use IAM policies like tag-based permissions with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information, see Tag-based policies.

For standard accelerators, Global Accelerator uses the Amazon Web Services global network to route traffic to the optimal regional endpoint based on health, client location, and policies that you configure. The service reacts instantly to changes in health or configuration to ensure that internet traffic from clients is always directed to healthy endpoints.

For more information about understanding and using Global Accelerator, see the Global Accelerator Developer Guide.

" + "documentation":"Global Accelerator

This is the Global Accelerator API Reference. This guide is for developers who need detailed information about Global Accelerator API actions, data types, and errors. For more information about Global Accelerator features, see the Global Accelerator Developer Guide.

Global Accelerator is a service in which you create accelerators to improve the performance of your applications for local and global users. Depending on the type of accelerator you choose, you can gain additional benefits.

  • By using a standard accelerator, you can improve availability of your internet applications that are used by a global audience. With a standard accelerator, Global Accelerator directs traffic to optimal endpoints over the Amazon Web Services global network.

  • For other scenarios, you might choose a custom routing accelerator. With a custom routing accelerator, you can use application logic to directly map one or more users to a specific endpoint among many endpoints.

Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands.

By default, Global Accelerator provides you with static IP addresses that you associate with your accelerator. The static IP addresses are anycast from the Amazon Web Services edge network. For IPv4, Global Accelerator provides two static IPv4 addresses. For dual-stack, Global Accelerator provides a total of four addresses: two static IPv4 addresses and two static IPv6 addresses. With a standard accelerator for IPv4, instead of using the addresses that Global Accelerator provides, you can configure these entry points to be IPv4 addresses from your own IP address ranges that you bring toGlobal Accelerator (BYOIP).

For a standard accelerator, they distribute incoming application traffic across multiple endpoint resources in multiple Amazon Web Services Regions , which increases the availability of your applications. Endpoints for standard accelerators can be Network Load Balancers, Application Load Balancers, Amazon EC2 instances, or Elastic IP addresses that are located in one Amazon Web Services Region or multiple Amazon Web Services Regions. For custom routing accelerators, you map traffic that arrives to the static IP addresses to specific Amazon EC2 servers in endpoints that are virtual private cloud (VPC) subnets.

The static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. You can use IAM policies like tag-based permissions with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information, see Tag-based policies.

For standard accelerators, Global Accelerator uses the Amazon Web Services global network to route traffic to the optimal regional endpoint based on health, client location, and policies that you configure. The service reacts instantly to changes in health or configuration to ensure that internet traffic from clients is always directed to healthy endpoints.

For more information about understanding and using Global Accelerator, see the Global Accelerator Developer Guide.

" } diff --git a/services/glue/pom.xml b/services/glue/pom.xml index 3e0ebf74fa6..b9957566262 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/glue/src/main/resources/codegen-resources/endpoint-rule-set.json index 477f7b05b76..d1bd27f670a 100644 --- a/services/glue/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/glue/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://glue-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://glue-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://glue-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://glue-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://glue.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://glue.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://glue.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://glue.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 354d9e8d980..5084c09af87 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -3668,7 +3668,7 @@ }, "Data":{ "shape":"AmazonRedshiftNodeData", - "documentation":"

Specifies the data of the Amazon Reshift target node.

" + "documentation":"

Specifies the data of the Amazon Redshift target node.

" }, "Inputs":{ "shape":"OneInput", @@ -5377,6 +5377,18 @@ "EvaluateDataQualityMultiFrame":{ "shape":"EvaluateDataQualityMultiFrame", "documentation":"

Specifies your data quality evaluation criteria. Allows multiple input data and returns a collection of Dynamic Frames.

" + }, + "Recipe":{ + "shape":"Recipe", + "documentation":"

Specifies a Glue DataBrew recipe node.

" + }, + "SnowflakeSource":{ + "shape":"SnowflakeSource", + "documentation":"

Specifies a Snowflake data source.

" + }, + "SnowflakeTarget":{ + "shape":"SnowflakeTarget", + "documentation":"

Specifies a target that writes to a Snowflake data source.

" } }, "documentation":"

CodeGenConfigurationNode enumerates all valid Node types. One and only one of its member variables can be populated.

" @@ -5831,7 +5843,7 @@ }, "ConnectionProperties":{ "shape":"ConnectionProperties", - "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use.)

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.

  • CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

  • SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.

  • CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

  • CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

  • SECRET_ID - The secret ID used for the secret manager of credentials.

  • CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.

  • CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.

  • CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.

  • KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

  • KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".

  • KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.

  • KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".

  • KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).

  • KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).

  • KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).

  • ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).

  • ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", or \"AWS_MSK_IAM\". These are the supported SASL Mechanisms.

  • KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.

  • KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.

  • ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.

  • KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.

  • KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.

  • KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.

" + "documentation":"

These key-value pairs define parameters for the connection:

  • HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.

  • PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.

  • USER_NAME - The name under which to log in to the database. The value string for USER_NAME is \"USERNAME\".

  • PASSWORD - A password, if one is used, for the user name.

  • ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

  • JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use.

  • JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

  • JDBC_ENGINE - The name of the JDBC engine to use.

  • JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

  • CONFIG_FILES - (Reserved for future use.)

  • INSTANCE_ID - The instance ID to use.

  • JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

  • JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false.

  • CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

  • SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.

  • CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

  • CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

  • SECRET_ID - The secret ID used for the secret manager of credentials.

  • CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.

  • CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.

  • CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.

  • KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

  • KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".

  • KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.

  • KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".

  • KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).

  • KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).

  • KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).

  • ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).

  • ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", or \"AWS_MSK_IAM\". These are the supported SASL Mechanisms.

  • KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.

  • KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.

  • ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).

  • KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.

  • KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.

  • KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.

  • KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.

  • KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.

" }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", @@ -5946,7 +5958,16 @@ "SECRET_ID", "CONNECTOR_URL", "CONNECTOR_TYPE", - "CONNECTOR_CLASS_NAME" + "CONNECTOR_CLASS_NAME", + "KAFKA_SASL_MECHANISM", + "KAFKA_SASL_SCRAM_USERNAME", + "KAFKA_SASL_SCRAM_PASSWORD", + "KAFKA_SASL_SCRAM_SECRETS_ARN", + "ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD", + "KAFKA_SASL_GSSAPI_KEYTAB", + "KAFKA_SASL_GSSAPI_KRB5_CONF", + "KAFKA_SASL_GSSAPI_SERVICE", + "KAFKA_SASL_GSSAPI_PRINCIPAL" ] }, "ConnectionType":{ @@ -6314,6 +6335,10 @@ "IcebergTargets":{ "shape":"IcebergTargetList", "documentation":"

Specifies Apache Iceberg data store targets.

" + }, + "HudiTargets":{ + "shape":"HudiTargetList", + "documentation":"

Specifies Apache Hudi data store targets.

" } }, "documentation":"

Specifies data stores to crawl.

" @@ -6901,7 +6926,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" }, "CodeGenConfigurationNodes":{ "shape":"CodeGenConfigurationNodes", @@ -7325,7 +7350,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" }, "SecurityConfiguration":{ "shape":"NameString", @@ -12654,6 +12679,28 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "HudiTarget":{ + "type":"structure", + "members":{ + "Paths":{ + "shape":"PathList", + "documentation":"

An array of Amazon S3 location strings for Hudi, each indicating the root folder with which the metadata files for a Hudi table resides. The Hudi folder may be located in a child folder of the root folder.

The crawler will scan all folders underneath a path for a Hudi folder.

" + }, + "ConnectionName":{ + "shape":"ConnectionName", + "documentation":"

The name of the connection to use to connect to the Hudi target. If your Hudi files are stored in buckets that require VPC authorization, you can set their connection properties here.

" + }, + "Exclusions":{ + "shape":"PathList", + "documentation":"

A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.

" + }, + "MaximumTraversalDepth":{ + "shape":"NullableInteger", + "documentation":"

The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time.

" + } + }, + "documentation":"

Specifies an Apache Hudi data source.

" + }, "HudiTargetCompressionType":{ "type":"string", "enum":[ @@ -12663,6 +12710,10 @@ "snappy" ] }, + "HudiTargetList":{ + "type":"list", + "member":{"shape":"HudiTarget"} + }, "IcebergInput":{ "type":"structure", "required":["MetadataOperation"], @@ -13119,7 +13170,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -13319,7 +13370,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -13425,7 +13476,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -15998,6 +16049,52 @@ "disabled" ] }, + "Recipe":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "RecipeReference" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"

The name of the Glue Studio node.

" + }, + "Inputs":{ + "shape":"OneInput", + "documentation":"

The nodes that are inputs to the recipe node, identified by id.

" + }, + "RecipeReference":{ + "shape":"RecipeReference", + "documentation":"

A reference to the DataBrew recipe used by the node.

" + } + }, + "documentation":"

A Glue Studio node that uses a Glue DataBrew recipe in Glue jobs.

" + }, + "RecipeReference":{ + "type":"structure", + "required":[ + "RecipeArn", + "RecipeVersion" + ], + "members":{ + "RecipeArn":{ + "shape":"EnclosedInStringProperty", + "documentation":"

The ARN of the DataBrew recipe.

" + }, + "RecipeVersion":{ + "shape":"RecipeVersion", + "documentation":"

The RecipeVersion of the DataBrew recipe.

" + } + }, + "documentation":"

A reference to a Glue DataBrew recipe.

" + }, + "RecipeVersion":{ + "type":"string", + "max":16, + "min":1 + }, "RecordsCount":{ "type":"long", "box":true @@ -17753,6 +17850,140 @@ }, "documentation":"

Specifies skewed values in a table. Skewed values are those that occur with very high frequency.

" }, + "SnowflakeNodeData":{ + "type":"structure", + "members":{ + "SourceType":{ + "shape":"GenericLimitedString", + "documentation":"

Specifies how retrieved data is specified. Valid values: \"table\", \"query\".

" + }, + "Connection":{ + "shape":"Option", + "documentation":"

Specifies a Glue Data Catalog Connection to a Snowflake endpoint.

" + }, + "Schema":{ + "shape":"GenericString", + "documentation":"

Specifies a Snowflake database schema for your node to use.

" + }, + "Table":{ + "shape":"GenericString", + "documentation":"

Specifies a Snowflake table for your node to use.

" + }, + "Database":{ + "shape":"GenericString", + "documentation":"

Specifies a Snowflake database for your node to use.

" + }, + "TempDir":{ + "shape":"EnclosedInStringProperty", + "documentation":"

Not currently used.

" + }, + "IamRole":{ + "shape":"Option", + "documentation":"

Not currently used.

" + }, + "AdditionalOptions":{ + "shape":"AdditionalOptions", + "documentation":"

Specifies additional options passed to the Snowflake connector. If options are specified elsewhere in this node, this will take precedence.

" + }, + "SampleQuery":{ + "shape":"GenericString", + "documentation":"

A SQL string used to retrieve data with the query sourcetype.

" + }, + "PreAction":{ + "shape":"GenericString", + "documentation":"

A SQL string run before the Snowflake connector performs its standard actions.

" + }, + "PostAction":{ + "shape":"GenericString", + "documentation":"

A SQL string run after the Snowflake connector performs its standard actions.

" + }, + "Action":{ + "shape":"GenericString", + "documentation":"

Specifies what action to take when writing to a table with preexisting data. Valid values: append, merge, truncate, drop.

" + }, + "Upsert":{ + "shape":"BooleanValue", + "documentation":"

Used when Action is append. Specifies the resolution behavior when a row already exists. If true, preexisting rows will be updated. If false, those rows will be inserted.

" + }, + "MergeAction":{ + "shape":"GenericLimitedString", + "documentation":"

Specifies a merge action. Valid values: simple, custom. If simple, merge behavior is defined by MergeWhenMatched and MergeWhenNotMatched. If custom, defined by MergeClause.

" + }, + "MergeWhenMatched":{ + "shape":"GenericLimitedString", + "documentation":"

Specifies how to resolve records that match preexisting data when merging. Valid values: update, delete.

" + }, + "MergeWhenNotMatched":{ + "shape":"GenericLimitedString", + "documentation":"

Specifies how to process records that do not match preexisting data when merging. Valid values: insert, none.

" + }, + "MergeClause":{ + "shape":"GenericString", + "documentation":"

A SQL statement that specifies a custom merge behavior.

" + }, + "StagingTable":{ + "shape":"GenericString", + "documentation":"

The name of a staging table used when performing merge or upsert append actions. Data is written to this table, then moved to table by a generated postaction.

" + }, + "SelectedColumns":{ + "shape":"OptionList", + "documentation":"

Specifies the columns combined to identify a record when detecting matches for merges and upserts. A list of structures with value, label and description keys. Each structure describes a column.

" + }, + "AutoPushdown":{ + "shape":"BooleanValue", + "documentation":"

Specifies whether automatic query pushdown is enabled. If pushdown is enabled, then when a query is run on Spark, if part of the query can be \"pushed down\" to the Snowflake server, it is pushed down. This improves performance of some queries.

" + }, + "TableSchema":{ + "shape":"OptionList", + "documentation":"

Manually defines the target schema for the node. A list of structures with value , label and description keys. Each structure defines a column.

" + } + }, + "documentation":"

Specifies configuration for Snowflake nodes in Glue Studio.

" + }, + "SnowflakeSource":{ + "type":"structure", + "required":[ + "Name", + "Data" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"

The name of the Snowflake data source.

" + }, + "Data":{ + "shape":"SnowflakeNodeData", + "documentation":"

Configuration for the Snowflake data source.

" + }, + "OutputSchemas":{ + "shape":"GlueSchemas", + "documentation":"

Specifies user-defined schemas for your output data.

" + } + }, + "documentation":"

Specifies a Snowflake data source.

" + }, + "SnowflakeTarget":{ + "type":"structure", + "required":[ + "Name", + "Data" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"

The name of the Snowflake target.

" + }, + "Data":{ + "shape":"SnowflakeNodeData", + "documentation":"

Specifies the data of the Snowflake target node.

" + }, + "Inputs":{ + "shape":"OneInput", + "documentation":"

The nodes that are inputs to the data target.

" + } + }, + "documentation":"

Specifies a Snowflake target.

" + }, "Sort":{ "type":"string", "enum":[ @@ -18277,7 +18508,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" }, "NumberOfWorkers":{ "shape":"NullableInteger", diff --git a/services/grafana/pom.xml b/services/grafana/pom.xml index 7944d703481..0483115834c 100644 --- a/services/grafana/pom.xml +++ b/services/grafana/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT grafana AWS Java SDK :: Services :: Grafana diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index baca851faf5..16e9e3af33a 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index 6378423be73..c3416ac7364 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index 85e8a7ed00e..ad3fe417bf3 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index f69316420ee..f6278bcd72a 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 guardduty diff --git a/services/guardduty/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/guardduty/src/main/resources/codegen-resources/endpoint-rule-set.json index bd3e1c6f766..0742b358787 100644 --- a/services/guardduty/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/guardduty/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://guardduty-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://guardduty-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://guardduty.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://guardduty-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://guardduty.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://guardduty-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://guardduty.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://guardduty.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://guardduty.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://guardduty.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/guardduty/src/main/resources/codegen-resources/service-2.json b/services/guardduty/src/main/resources/codegen-resources/service-2.json index e40fa46fa6f..5869936783c 100644 --- a/services/guardduty/src/main/resources/codegen-resources/service-2.json +++ b/services/guardduty/src/main/resources/codegen-resources/service-2.json @@ -797,7 +797,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, threat intel sets, publishing destination, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource.

" + "documentation":"

Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, threat intel sets, and publishing destination, with a limit of 50 tags per each resource. When invoked, this operation returns all assigned tags for a given resource.

" }, "ListThreatIntelSets":{ "name":"ListThreatIntelSets", @@ -2021,7 +2021,7 @@ }, "FindingCriteria":{ "shape":"FindingCriteria", - "documentation":"

Represents the criteria to be used in the filter for querying findings.

You can only use the following attributes to query findings:

  • accountId

  • region

  • id

  • resource.accessKeyDetails.accessKeyId

  • resource.accessKeyDetails.principalId

  • resource.accessKeyDetails.userName

  • resource.accessKeyDetails.userType

  • resource.instanceDetails.iamInstanceProfile.id

  • resource.instanceDetails.imageId

  • resource.instanceDetails.instanceId

  • resource.instanceDetails.outpostArn

  • resource.instanceDetails.networkInterfaces.ipv6Addresses

  • resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

  • resource.instanceDetails.networkInterfaces.publicDnsName

  • resource.instanceDetails.networkInterfaces.publicIp

  • resource.instanceDetails.networkInterfaces.securityGroups.groupId

  • resource.instanceDetails.networkInterfaces.securityGroups.groupName

  • resource.instanceDetails.networkInterfaces.subnetId

  • resource.instanceDetails.networkInterfaces.vpcId

  • resource.instanceDetails.tags.key

  • resource.instanceDetails.tags.value

  • resource.resourceType

  • service.action.actionType

  • service.action.awsApiCallAction.api

  • service.action.awsApiCallAction.callerType

  • service.action.awsApiCallAction.errorCode

  • service.action.awsApiCallAction.userAgent

  • service.action.awsApiCallAction.remoteIpDetails.city.cityName

  • service.action.awsApiCallAction.remoteIpDetails.country.countryName

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.awsApiCallAction.remoteIpDetails.organization.asn

  • service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

  • service.action.awsApiCallAction.serviceName

  • service.action.dnsRequestAction.domain

  • service.action.networkConnectionAction.blocked

  • service.action.networkConnectionAction.connectionDirection

  • service.action.networkConnectionAction.localPortDetails.port

  • service.action.networkConnectionAction.protocol

  • service.action.networkConnectionAction.localIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.city.cityName

  • service.action.networkConnectionAction.remoteIpDetails.country.countryName

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.organization.asn

  • service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

  • service.action.networkConnectionAction.remotePortDetails.port

  • service.additionalInfo.threatListName

  • resource.s3BucketDetails.publicAccess.effectivePermissions

  • resource.s3BucketDetails.name

  • resource.s3BucketDetails.tags.key

  • resource.s3BucketDetails.tags.value

  • resource.s3BucketDetails.type

  • service.resourceRole

  • severity

  • type

  • updatedAt

    Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.

", + "documentation":"

Represents the criteria to be used in the filter for querying findings.

You can only use the following attributes to query findings:

  • accountId

  • id

  • region

  • severity

    To filter on the basis of severity, the API and CLI use the following input list for the FindingCriteria condition:

    • Low: [\"1\", \"2\", \"3\"]

    • Medium: [\"4\", \"5\", \"6\"]

    • High: [\"7\", \"8\", \"9\"]

    For more information, see Severity levels for GuardDuty findings.

  • type

  • updatedAt

    Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.

  • resource.accessKeyDetails.accessKeyId

  • resource.accessKeyDetails.principalId

  • resource.accessKeyDetails.userName

  • resource.accessKeyDetails.userType

  • resource.instanceDetails.iamInstanceProfile.id

  • resource.instanceDetails.imageId

  • resource.instanceDetails.instanceId

  • resource.instanceDetails.tags.key

  • resource.instanceDetails.tags.value

  • resource.instanceDetails.networkInterfaces.ipv6Addresses

  • resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

  • resource.instanceDetails.networkInterfaces.publicDnsName

  • resource.instanceDetails.networkInterfaces.publicIp

  • resource.instanceDetails.networkInterfaces.securityGroups.groupId

  • resource.instanceDetails.networkInterfaces.securityGroups.groupName

  • resource.instanceDetails.networkInterfaces.subnetId

  • resource.instanceDetails.networkInterfaces.vpcId

  • resource.instanceDetails.outpostArn

  • resource.resourceType

  • resource.s3BucketDetails.publicAccess.effectivePermissions

  • resource.s3BucketDetails.name

  • resource.s3BucketDetails.tags.key

  • resource.s3BucketDetails.tags.value

  • resource.s3BucketDetails.type

  • service.action.actionType

  • service.action.awsApiCallAction.api

  • service.action.awsApiCallAction.callerType

  • service.action.awsApiCallAction.errorCode

  • service.action.awsApiCallAction.remoteIpDetails.city.cityName

  • service.action.awsApiCallAction.remoteIpDetails.country.countryName

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.awsApiCallAction.remoteIpDetails.organization.asn

  • service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

  • service.action.awsApiCallAction.serviceName

  • service.action.dnsRequestAction.domain

  • service.action.networkConnectionAction.blocked

  • service.action.networkConnectionAction.connectionDirection

  • service.action.networkConnectionAction.localPortDetails.port

  • service.action.networkConnectionAction.protocol

  • service.action.networkConnectionAction.remoteIpDetails.city.cityName

  • service.action.networkConnectionAction.remoteIpDetails.country.countryName

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.organization.asn

  • service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

  • service.action.networkConnectionAction.remotePortDetails.port

  • service.action.awsApiCallAction.remoteAccountDetails.affiliated

  • service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.kubernetesApiCallAction.requestUri

  • service.action.networkConnectionAction.localIpDetails.ipAddressV4

  • service.action.networkConnectionAction.protocol

  • service.action.awsApiCallAction.serviceName

  • service.action.awsApiCallAction.remoteAccountDetails.accountId

  • service.additionalInfo.threatListName

  • service.resourceRole

  • resource.eksClusterDetails.name

  • resource.kubernetesDetails.kubernetesWorkloadDetails.name

  • resource.kubernetesDetails.kubernetesWorkloadDetails.namespace

  • resource.kubernetesDetails.kubernetesUserDetails.username

  • resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image

  • resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix

  • service.ebsVolumeScanDetails.scanId

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash

  • resource.ecsClusterDetails.name

  • resource.ecsClusterDetails.taskDetails.containers.image

  • resource.ecsClusterDetails.taskDetails.definitionArn

  • resource.containerDetails.image

  • resource.rdsDbInstanceDetails.dbInstanceIdentifier

  • resource.rdsDbInstanceDetails.dbClusterIdentifier

  • resource.rdsDbInstanceDetails.engine

  • resource.rdsDbUserDetails.user

  • resource.rdsDbInstanceDetails.tags.key

  • resource.rdsDbInstanceDetails.tags.value

  • service.runtimeDetails.process.executableSha256

  • service.runtimeDetails.process.name

  • service.runtimeDetails.process.name

  • resource.lambdaDetails.functionName

  • resource.lambdaDetails.functionArn

  • resource.lambdaDetails.tags.key

  • resource.lambdaDetails.tags.value

", "locationName":"findingCriteria" }, "ClientToken":{ @@ -5764,7 +5764,8 @@ "type":"string", "enum":[ "NEW", - "NONE" + "NONE", + "ALL" ] }, "Organization":{ diff --git a/services/health/pom.xml b/services/health/pom.xml index c9337d48fff..95ef8e2589c 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index 4e5ec8de714..1ec3aa9d834 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/healthlake/src/main/resources/codegen-resources/service-2.json b/services/healthlake/src/main/resources/codegen-resources/service-2.json index 7d769be9680..ce4e920381f 100644 --- a/services/healthlake/src/main/resources/codegen-resources/service-2.json +++ b/services/healthlake/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a Data Store that can ingest and export FHIR formatted data.

" + "documentation":"

Creates a data store that can ingest and export FHIR formatted data.

" }, "DeleteFHIRDatastore":{ "name":"DeleteFHIRDatastore", @@ -46,7 +46,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes a Data Store.

" + "documentation":"

Deletes a data store.

" }, "DescribeFHIRDatastore":{ "name":"DescribeFHIRDatastore", @@ -62,7 +62,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets the properties associated with the FHIR Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.

" + "documentation":"

Gets the properties associated with the FHIR data store, including the data store ID, data store ARN, data store name, data store status, when the data store was created, data store type version, and the data store's endpoint.

" }, "DescribeFHIRExportJob":{ "name":"DescribeFHIRExportJob", @@ -109,7 +109,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists all FHIR Data Stores that are in the user’s account, regardless of Data Store status.

" + "documentation":"

Lists all FHIR data stores that are in the user’s account, regardless of data store status.

" }, "ListFHIRExportJobs":{ "name":"ListFHIRExportJobs", @@ -157,7 +157,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns a list of all existing tags associated with a Data Store.

" + "documentation":"

Returns a list of all existing tags associated with a data store.

" }, "StartFHIRExportJob":{ "name":"StartFHIRExportJob", @@ -205,7 +205,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds a user specified key and value tag to a Data Store.

" + "documentation":"

Adds a user specified key and value tag to a data store.

" }, "UntagResource":{ "name":"UntagResource", @@ -219,7 +219,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Removes tags from a Data Store.

" + "documentation":"

Removes tags from a data store.

" } }, "shapes":{ @@ -270,7 +270,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The Data Store is in a transition state and the user requested action can not be performed.

", + "documentation":"

The data store is in a transition state and the user requested action can not be performed.

", "exception":true }, "CreateFHIRDatastoreRequest":{ @@ -279,19 +279,19 @@ "members":{ "DatastoreName":{ "shape":"DatastoreName", - "documentation":"

The user generated name for the Data Store.

" + "documentation":"

The user generated name for the data store.

" }, "DatastoreTypeVersion":{ "shape":"FHIRVersion", - "documentation":"

The FHIR version of the Data Store. The only supported version is R4.

" + "documentation":"

The FHIR version of the data store. The only supported version is R4.

" }, "SseConfiguration":{ "shape":"SseConfiguration", - "documentation":"

The server-side encryption key configuration for a customer provided encryption key specified for creating a Data Store.

" + "documentation":"

The server-side encryption key configuration for a customer provided encryption key specified for creating a data store.

" }, "PreloadDataConfig":{ "shape":"PreloadDataConfig", - "documentation":"

Optional parameter to preload data upon creation of the Data Store. Currently, the only supported preloaded data is synthetic data generated from Synthea.

" + "documentation":"

Optional parameter to preload data upon creation of the data store. Currently, the only supported preloaded data is synthetic data generated from Synthea.

" }, "ClientToken":{ "shape":"ClientTokenString", @@ -300,11 +300,11 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

Resource tags that are applied to a Data Store when it is created.

" + "documentation":"

Resource tags that are applied to a data store when it is created.

" }, "IdentityProviderConfiguration":{ "shape":"IdentityProviderConfiguration", - "documentation":"

The configuration of the identity provider that you want to use for your Data Store.

" + "documentation":"

The configuration of the identity provider that you want to use for your data store.

" } } }, @@ -319,19 +319,19 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS-generated Data Store id. This id is in the output from the initial Data Store creation call.

" + "documentation":"

The AWS-generated data store id. This id is in the output from the initial data store creation call.

" }, "DatastoreArn":{ "shape":"DatastoreArn", - "documentation":"

The Data Store ARN is generated during the creation of the Data Store and can be found in the output from the initial Data Store creation call.

" + "documentation":"

The data store ARN is generated during the creation of the data store and can be found in the output from the initial data store creation call.

" }, "DatastoreStatus":{ "shape":"DatastoreStatus", - "documentation":"

The status of the FHIR Data Store. Possible statuses are ‘CREATING’, ‘ACTIVE’, ‘DELETING’, ‘DELETED’.

" + "documentation":"

The status of the FHIR data store.

" }, "DatastoreEndpoint":{ "shape":"BoundedLengthString", - "documentation":"

The AWS endpoint for the created Data Store.

" + "documentation":"

The AWS endpoint for the created data store.

" } } }, @@ -344,22 +344,22 @@ "members":{ "DatastoreName":{ "shape":"DatastoreName", - "documentation":"

Allows the user to filter Data Store results by name.

" + "documentation":"

Allows the user to filter data store results by name.

" }, "DatastoreStatus":{ "shape":"DatastoreStatus", - "documentation":"

Allows the user to filter Data Store results by status.

" + "documentation":"

Allows the user to filter data store results by status.

" }, "CreatedBefore":{ "shape":"Timestamp", - "documentation":"

A filter that allows the user to set cutoff dates for records. All Data Stores created before the specified date will be included in the results.

" + "documentation":"

A filter that allows the user to set cutoff dates for records. All data stores created before the specified date will be included in the results.

" }, "CreatedAfter":{ "shape":"Timestamp", - "documentation":"

A filter that allows the user to set cutoff dates for records. All Data Stores created after the specified date will be included in the results.

" + "documentation":"

A filter that allows the user to set cutoff dates for records. All data stores created after the specified date will be included in the results.

" } }, - "documentation":"

The filters applied to Data Store query.

" + "documentation":"

The filters applied to data store query.

" }, "DatastoreId":{ "type":"string", @@ -385,23 +385,23 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS-generated ID number for the Data Store.

" + "documentation":"

The AWS-generated ID number for the data store.

" }, "DatastoreArn":{ "shape":"DatastoreArn", - "documentation":"

The Amazon Resource Name used in the creation of the Data Store.

" + "documentation":"

The Amazon Resource Name used in the creation of the data store.

" }, "DatastoreName":{ "shape":"DatastoreName", - "documentation":"

The user-generated name for the Data Store.

" + "documentation":"

The user-generated name for the data store.

" }, "DatastoreStatus":{ "shape":"DatastoreStatus", - "documentation":"

The status of the Data Store. Possible statuses are 'CREATING', 'ACTIVE', 'DELETING', or 'DELETED'.

" + "documentation":"

The status of the data store.

" }, "CreatedAt":{ "shape":"Timestamp", - "documentation":"

The time that a Data Store was created.

" + "documentation":"

The time that a data store was created.

" }, "DatastoreTypeVersion":{ "shape":"FHIRVersion", @@ -409,7 +409,7 @@ }, "DatastoreEndpoint":{ "shape":"String", - "documentation":"

The AWS endpoint for the Data Store. Each Data Store will have it's own endpoint with Data Store ID in the endpoint URL.

" + "documentation":"

The AWS endpoint for the data store. Each data store will have it's own endpoint with data store ID in the endpoint URL.

" }, "SseConfiguration":{ "shape":"SseConfiguration", @@ -417,14 +417,14 @@ }, "PreloadDataConfig":{ "shape":"PreloadDataConfig", - "documentation":"

The preloaded data configuration for the Data Store. Only data preloaded from Synthea is supported.

" + "documentation":"

The preloaded data configuration for the data store. Only data preloaded from Synthea is supported.

" }, "IdentityProviderConfiguration":{ "shape":"IdentityProviderConfiguration", - "documentation":"

The identity provider that you selected when you created the Data Store.

" + "documentation":"

The identity provider that you selected when you created the data store.

" } }, - "documentation":"

Displays the properties of the Data Store, including the ID, ARN, name, and the status of the Data Store.

" + "documentation":"

Displays the properties of the data store, including the ID, ARN, name, and the status of the data store.

" }, "DatastorePropertiesList":{ "type":"list", @@ -445,7 +445,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS-generated ID for the Data Store to be deleted.

" + "documentation":"

The AWS-generated ID for the data store to be deleted.

" } } }, @@ -460,19 +460,19 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS-generated ID for the Data Store to be deleted.

" + "documentation":"

The AWS-generated ID for the data store to be deleted.

" }, "DatastoreArn":{ "shape":"DatastoreArn", - "documentation":"

The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission.

" + "documentation":"

The Amazon Resource Name (ARN) that gives AWS HealthLake access permission.

" }, "DatastoreStatus":{ "shape":"DatastoreStatus", - "documentation":"

The status of the Data Store that the user has requested to be deleted.

" + "documentation":"

The status of the data store that the user has requested to be deleted.

" }, "DatastoreEndpoint":{ "shape":"BoundedLengthString", - "documentation":"

The AWS endpoint for the Data Store the user has requested to be deleted.

" + "documentation":"

The AWS endpoint for the data store the user has requested to be deleted.

" } } }, @@ -482,7 +482,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS-generated Data Store ID.

" + "documentation":"

The AWS-generated data store ID.

" } } }, @@ -492,7 +492,7 @@ "members":{ "DatastoreProperties":{ "shape":"DatastoreProperties", - "documentation":"

All properties associated with a Data Store, including the Data Store ID, Data Store ARN, Data Store name, Data Store status, created at, Data Store type version, and Data Store endpoint.

" + "documentation":"

All properties associated with a data store, including the data store ID, data store ARN, data store name, data store status, when the data store was created, data store type version, and the data store's endpoint.

" } } }, @@ -505,7 +505,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS generated ID for the Data Store from which files are being exported from for an export job.

" + "documentation":"

The AWS generated ID for the data store from which files are being exported from for an export job.

" }, "JobId":{ "shape":"JobId", @@ -532,7 +532,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS-generated ID of the Data Store.

" + "documentation":"

The AWS-generated ID of the data store.

" }, "JobId":{ "shape":"JobId", @@ -588,7 +588,7 @@ }, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS generated ID for the Data Store from which files are being exported for an export job.

" + "documentation":"

The AWS generated ID for the data store from which files are being exported for an export job.

" }, "OutputDataConfig":{ "shape":"OutputDataConfig", @@ -625,11 +625,11 @@ "members":{ "AuthorizationStrategy":{ "shape":"AuthorizationStrategy", - "documentation":"

The authorization strategy that you selected when you created the Data Store.

" + "documentation":"

The authorization strategy that you selected when you created the data store.

" }, "FineGrainedAuthorizationEnabled":{ "shape":"Boolean", - "documentation":"

If you enabled fine-grained authorization when you created the Data Store.

" + "documentation":"

If you enabled fine-grained authorization when you created the data store.

" }, "Metadata":{ "shape":"ConfigurationMetadata", @@ -640,7 +640,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the Lambda function that you want to use to decode the access token created by the authorization server.

" } }, - "documentation":"

The identity provider configuration that you gave when the Data Store was created.

" + "documentation":"

The identity provider configuration that you gave when the data store was created.

" }, "ImportJobProperties":{ "type":"structure", @@ -683,14 +683,14 @@ "JobOutputDataConfig":{"shape":"OutputDataConfig"}, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) that gives Amazon HealthLake access to your input data.

" + "documentation":"

The Amazon Resource Name (ARN) that gives AWS HealthLake access to your input data.

" }, "Message":{ "shape":"Message", "documentation":"

An explanation of any errors that may have occurred during the FHIR import job.

" } }, - "documentation":"

Displays the properties of the import job, including the ID, Arn, Name, and the status of the Data Store.

" + "documentation":"

Displays the properties of the import job, including the ID, Arn, Name, and the status of the data store.

" }, "ImportJobPropertiesList":{ "type":"list", @@ -701,7 +701,7 @@ "members":{ "S3Uri":{ "shape":"S3Uri", - "documentation":"

The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.

" + "documentation":"

The S3Uri is the user specified S3 location of the FHIR data to be imported into AWS HealthLake.

" } }, "documentation":"

The input properties for an import job.

", @@ -748,14 +748,14 @@ "members":{ "CmkType":{ "shape":"CmkType", - "documentation":"

The type of customer-managed-key(CMK) used for encyrption. The two types of supported CMKs are customer owned CMKs and AWS owned CMKs.

" + "documentation":"

The type of customer-managed-key(CMK) used for encryption. The two types of supported CMKs are customer owned CMKs and AWS owned CMKs.

" }, "KmsKeyId":{ "shape":"EncryptionKeyID", - "documentation":"

The KMS encryption key id/alias used to encrypt the Data Store contents at rest.

" + "documentation":"

The KMS encryption key id/alias used to encrypt the data store contents at rest.

" } }, - "documentation":"

The customer-managed-key(CMK) used when creating a Data Store. If a customer owned key is not specified, an AWS owned key will be used for encryption.

" + "documentation":"

The customer-managed-key(CMK) used when creating a data store. If a customer owned key is not specified, an AWS owned key will be used for encryption.

" }, "LambdaArn":{ "type":"string", @@ -768,15 +768,15 @@ "members":{ "Filter":{ "shape":"DatastoreFilter", - "documentation":"

Lists all filters associated with a FHIR Data Store request.

" + "documentation":"

Lists all filters associated with a FHIR data store request.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

Fetches the next page of Data Stores when results are paginated.

" + "documentation":"

Fetches the next page of data stores when results are paginated.

" }, "MaxResults":{ "shape":"MaxResultsInteger", - "documentation":"

The maximum number of Data Stores returned in a single page of a ListFHIRDatastoresRequest call.

" + "documentation":"

The maximum number of data stores returned in a single page of a ListFHIRDatastoresRequest call.

" } } }, @@ -786,7 +786,7 @@ "members":{ "DatastorePropertiesList":{ "shape":"DatastorePropertiesList", - "documentation":"

All properties associated with the listed Data Stores.

" + "documentation":"

All properties associated with the listed data stores.

" }, "NextToken":{ "shape":"NextToken", @@ -800,7 +800,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

This parameter limits the response to the export job with the specified Data Store ID.

" + "documentation":"

This parameter limits the response to the export job with the specified data store ID.

" }, "NextToken":{ "shape":"NextToken", @@ -848,7 +848,7 @@ "members":{ "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

This parameter limits the response to the import job with the specified Data Store ID.

" + "documentation":"

This parameter limits the response to the import job with the specified data store ID.

" }, "NextToken":{ "shape":"NextToken", @@ -896,7 +896,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name(ARN) of the Data Store for which tags are being added.

" + "documentation":"

The Amazon Resource Name(ARN) of the data store for which tags are being added.

" } } }, @@ -905,7 +905,7 @@ "members":{ "Tags":{ "shape":"TagList", - "documentation":"

Returns a list of tags associated with a Data Store.

" + "documentation":"

Returns a list of tags associated with a data store.

" } } }, @@ -945,7 +945,7 @@ "documentation":"

The type of preloaded data. Only Synthea preloaded data is supported.

" } }, - "documentation":"

The input properties for the preloaded Data Store. Only data preloaded from Synthea is supported.

" + "documentation":"

The input properties for the preloaded data store. Only data preloaded from Synthea is supported.

" }, "PreloadDataType":{ "type":"string", @@ -956,7 +956,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"

The requested Data Store was not found.

", + "documentation":"

The requested data store was not found.

", "exception":true }, "S3Configuration":{ @@ -968,7 +968,7 @@ "members":{ "S3Uri":{ "shape":"S3Uri", - "documentation":"

The S3Uri is the user specified S3 location of the FHIR data to be imported into Amazon HealthLake.

" + "documentation":"

The S3Uri is the user specified S3 location of the FHIR data to be imported into AWS HealthLake.

" }, "KmsKeyId":{ "shape":"EncryptionKeyID", @@ -1012,7 +1012,7 @@ }, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS generated ID for the Data Store from which files are being exported for an export job.

" + "documentation":"

The AWS generated ID for the data store from which files are being exported for an export job.

" }, "DataAccessRoleArn":{ "shape":"IamRoleArn", @@ -1042,7 +1042,7 @@ }, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS generated ID for the Data Store from which files are being exported for an export job.

" + "documentation":"

The AWS generated ID for the data store from which files are being exported for an export job.

" } } }, @@ -1067,11 +1067,11 @@ "JobOutputDataConfig":{"shape":"OutputDataConfig"}, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS-generated Data Store ID.

" + "documentation":"

The AWS-generated data store ID.

" }, "DataAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) that gives Amazon HealthLake access permission.

" + "documentation":"

The Amazon Resource Name (ARN) that gives AWS HealthLake access permission.

" }, "ClientToken":{ "shape":"ClientTokenString", @@ -1097,7 +1097,7 @@ }, "DatastoreId":{ "shape":"DatastoreId", - "documentation":"

The AWS-generated Data Store ID.

" + "documentation":"

The AWS-generated data store ID.

" } } }, @@ -1151,11 +1151,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name(ARN)that gives Amazon HealthLake access to the Data Store which tags are being added to.

" + "documentation":"

The Amazon Resource Name(ARN)that gives AWS HealthLake access to the data store which tags are being added to.

" }, "Tags":{ "shape":"TagList", - "documentation":"

The user specified key and value pair tags being added to a Data Store.

" + "documentation":"

The user specified key and value pair tags being added to a data store.

" } } }, @@ -1188,11 +1188,11 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

\"The Amazon Resource Name(ARN) of the Data Store for which tags are being removed

" + "documentation":"

The Amazon Resource Name(ARN) of the data store for which tags are being removed.

" }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"

The keys for the tags to be removed from the Healthlake Data Store.

" + "documentation":"

The keys for the tags to be removed from the HealthLake data store.

" } } }, @@ -1210,5 +1210,5 @@ "exception":true } }, - "documentation":"

Amazon HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their FHIR-formatted data in a consistent fashion in the cloud.

" + "documentation":"

AWS HealthLake is a HIPAA eligibile service that allows customers to store, transform, query, and analyze their FHIR-formatted data in a consistent fashion in the cloud.

" } diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index 76287d6a63e..770e789c2e2 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 34b8328bd17..45cee3318ce 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index dc1954b356d..0d21860fc98 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index 36dad92f6d5..b42888fbf85 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 608e04ecb67..8c6b51f1701 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/inspector2/pom.xml b/services/inspector2/pom.xml index 3fdf104d532..237e7b59e57 100644 --- a/services/inspector2/pom.xml +++ b/services/inspector2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT inspector2 AWS Java SDK :: Services :: Inspector2 diff --git a/services/inspector2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/inspector2/src/main/resources/codegen-resources/endpoint-rule-set.json index 303effe98ea..a8d6cdbbc6f 100644 --- a/services/inspector2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/inspector2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://inspector2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://inspector2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://inspector2-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://inspector2-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://inspector2.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://inspector2.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://inspector2.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://inspector2.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/inspector2/src/main/resources/codegen-resources/service-2.json b/services/inspector2/src/main/resources/codegen-resources/service-2.json index 26d23ea7d67..357ec7f71f4 100644 --- a/services/inspector2/src/main/resources/codegen-resources/service-2.json +++ b/services/inspector2/src/main/resources/codegen-resources/service-2.json @@ -65,6 +65,23 @@ ], "documentation":"

Retrieves code snippets from findings that Amazon Inspector detected code vulnerabilities in.

" }, + "BatchGetFindingDetails":{ + "name":"BatchGetFindingDetails", + "http":{ + "method":"POST", + "requestUri":"/findings/details/batch/get", + "responseCode":200 + }, + "input":{"shape":"BatchGetFindingDetailsRequest"}, + "output":{"shape":"BatchGetFindingDetailsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Gets vulnerability details for findings.

" + }, "BatchGetFreeTrialInfo":{ "name":"BatchGetFreeTrialInfo", "http":{ @@ -1492,6 +1509,29 @@ } } }, + "BatchGetFindingDetailsRequest":{ + "type":"structure", + "required":["findingArns"], + "members":{ + "findingArns":{ + "shape":"FindingArnList", + "documentation":"

A list of finding ARNs.

" + } + } + }, + "BatchGetFindingDetailsResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"FindingDetailsErrorList", + "documentation":"

Error information for findings that details could not be returned for.

" + }, + "findingDetails":{ + "shape":"FindingDetails", + "documentation":"

A finding's vulnerability details.

" + } + } + }, "BatchGetFreeTrialInfoRequest":{ "type":"structure", "required":["accountIds"], @@ -2416,7 +2456,7 @@ }, "keyPrefix":{ "shape":"String", - "documentation":"

The prefix of the Amazon S3 bucket used to export findings.

" + "documentation":"

The prefix that the findings will be written under.

" }, "kmsKeyArn":{ "shape":"String", @@ -2820,6 +2860,40 @@ ] }, "ErrorMessage":{"type":"string"}, + "Evidence":{ + "type":"structure", + "members":{ + "evidenceDetail":{ + "shape":"EvidenceDetail", + "documentation":"

The evidence details.

" + }, + "evidenceRule":{ + "shape":"EvidenceRule", + "documentation":"

The evidence rule.

" + }, + "severity":{ + "shape":"EvidenceSeverity", + "documentation":"

The evidence severity.

" + } + }, + "documentation":"

Details of the evidence for a vulnerability identified in a finding.

" + }, + "EvidenceDetail":{ + "type":"string", + "min":0 + }, + "EvidenceList":{ + "type":"list", + "member":{"shape":"Evidence"} + }, + "EvidenceRule":{ + "type":"string", + "min":0 + }, + "EvidenceSeverity":{ + "type":"string", + "min":0 + }, "ExecutionRoleArn":{ "type":"string", "pattern":"^arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" @@ -3123,7 +3197,7 @@ }, "networkProtocol":{ "shape":"StringFilterList", - "documentation":"

Details on the ingress source addresses used to filter findings.

" + "documentation":"

Details on network protocol used to filter findings.

" }, "portRange":{ "shape":"PortRangeFilterList", @@ -3303,11 +3377,98 @@ "min":1, "pattern":"^arn:(aws[a-zA-Z-]*)?:inspector2:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:finding/[a-f0-9]{32}$" }, + "FindingArnList":{ + "type":"list", + "member":{"shape":"FindingArn"}, + "max":10, + "min":1 + }, "FindingDescription":{ "type":"string", "max":1024, "min":1 }, + "FindingDetail":{ + "type":"structure", + "members":{ + "cisaData":{"shape":"CisaData"}, + "cwes":{ + "shape":"Cwes", + "documentation":"

The Common Weakness Enumerations (CWEs) associated with the vulnerability.

" + }, + "epssScore":{ + "shape":"Double", + "documentation":"

The Exploit Prediction Scoring System (EPSS) score of the vulnerability.

" + }, + "evidences":{ + "shape":"EvidenceList", + "documentation":"

Information on the evidence of the vulnerability.

" + }, + "exploitObserved":{"shape":"ExploitObserved"}, + "findingArn":{ + "shape":"FindingArn", + "documentation":"

The finding ARN that the vulnerability details are associated with.

" + }, + "referenceUrls":{ + "shape":"VulnerabilityReferenceUrls", + "documentation":"

The reference URLs for the vulnerability data.

" + }, + "riskScore":{ + "shape":"RiskScore", + "documentation":"

The risk score of the vulnerability.

" + }, + "tools":{ + "shape":"Tools", + "documentation":"

The known malware tools or kits that can exploit the vulnerability.

" + }, + "ttps":{ + "shape":"Ttps", + "documentation":"

The MITRE adversary tactics, techniques, or procedures (TTPs) associated with the vulnerability.

" + } + }, + "documentation":"

Details of the vulnerability identified in a finding.

" + }, + "FindingDetails":{ + "type":"list", + "member":{"shape":"FindingDetail"}, + "min":0 + }, + "FindingDetailsError":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "findingArn" + ], + "members":{ + "errorCode":{ + "shape":"FindingDetailsErrorCode", + "documentation":"

The error code.

" + }, + "errorMessage":{ + "shape":"NonEmptyString", + "documentation":"

The error message.

" + }, + "findingArn":{ + "shape":"FindingArn", + "documentation":"

The finding ARN that returned an error.

" + } + }, + "documentation":"

Details about an error encountered when trying to return vulnerability data for a finding.

" + }, + "FindingDetailsErrorCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "ACCESS_DENIED", + "FINDING_DETAILS_NOT_FOUND", + "INVALID_INPUT" + ] + }, + "FindingDetailsErrorList":{ + "type":"list", + "member":{"shape":"FindingDetailsError"} + }, "FindingList":{ "type":"list", "member":{"shape":"Finding"}, @@ -5283,6 +5444,10 @@ "AWS_LAMBDA_FUNCTION" ] }, + "RiskScore":{ + "type":"integer", + "box":true + }, "Runtime":{ "type":"string", "enum":[ @@ -5812,6 +5977,14 @@ "ALL" ] }, + "Tool":{ + "type":"string", + "min":0 + }, + "Tools":{ + "type":"list", + "member":{"shape":"Tool"} + }, "Ttp":{ "type":"string", "max":30, diff --git a/services/internetmonitor/pom.xml b/services/internetmonitor/pom.xml index 9664cf5a58b..83fe357ad22 100644 --- a/services/internetmonitor/pom.xml +++ b/services/internetmonitor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT internetmonitor AWS Java SDK :: Services :: Internet Monitor diff --git a/services/internetmonitor/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/internetmonitor/src/main/resources/codegen-resources/endpoint-rule-set.json index 25b3af24f3a..3de3fac3112 100644 --- a/services/internetmonitor/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/internetmonitor/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -64,18 +64,28 @@ ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -83,19 +93,35 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "Region" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], "type": "tree", "rules": [ { @@ -110,7 +136,7 @@ { "ref": "PartitionResult" }, - "supportsDualStack" + "supportsFIPS" ] } ] @@ -118,62 +144,10 @@ ], "type": "tree", "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://internetmonitor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, { "conditions": [], "endpoint": { - "url": "https://internetmonitor.{Region}.{PartitionResult#dualStackDnsSuffix}", + "url": "https://internetmonitor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, @@ -183,95 +157,91 @@ }, { "conditions": [], - "type": "tree", - "rules": [ + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://internetmonitor.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "fn": "getAttr", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://internetmonitor-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "ref": "PartitionResult" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://internetmonitor.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "supportsFIPS" ] } ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://internetmonitor-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://internetmonitor.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/internetmonitor/src/main/resources/codegen-resources/service-2.json b/services/internetmonitor/src/main/resources/codegen-resources/service-2.json index e1cdcd204a5..ae9f3da0679 100644 --- a/services/internetmonitor/src/main/resources/codegen-resources/service-2.json +++ b/services/internetmonitor/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"LimitExceededException"}, {"shape":"ValidationException"} ], - "documentation":"

Creates a monitor in Amazon CloudWatch Internet Monitor. A monitor is built based on information from the application resources that you add: Amazon Virtual Private Clouds (VPCs), Amazon CloudFront distributions, and WorkSpaces directories. Internet Monitor then publishes internet measurements from Amazon Web Services that are specific to the city-networks, that is, the locations and ASNs (typically internet service providers or ISPs), where clients access your application. For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide.

When you create a monitor, you set a maximum limit for the number of city-networks where client traffic is monitored. The city-network maximum that you choose is the limit, but you only pay for the number of city-networks that are actually monitored. You can change the maximum at any time by updating your monitor. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide.

", + "documentation":"

Creates a monitor in Amazon CloudWatch Internet Monitor. A monitor is built based on information from the application resources that you add: VPCs, Network Load Balancers (NLBs), Amazon CloudFront distributions, and Amazon WorkSpaces directories. Internet Monitor then publishes internet measurements from Amazon Web Services that are specific to the city-networks. That is, the locations and ASNs (typically internet service providers or ISPs), where clients access your application. For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide.

When you create a monitor, you choose the percentage of traffic that you want to monitor. You can also set a maximum limit for the number of city-networks where client traffic is monitored, that caps the total traffic that Internet Monitor monitors. A city-network maximum is the limit of city-networks, but you only pay for the number of city-networks that are actually monitored. You can update your monitor at any time to change the percentage of traffic to monitor or the city-networks maximum. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide.

", "idempotent":true }, "DeleteMonitor":{ @@ -65,7 +65,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets information the Amazon CloudWatch Internet Monitor has created and stored about a health event for a specified monitor. This information includes the impacted locations, and all of the information related to the event by location.

The information returned includes the performance, availability, and round-trip time impact, information about the network providers, the event type, and so on.

Information rolled up at the global traffic level is also returned, including the impact type and total traffic impact.

" + "documentation":"

Gets information the Amazon CloudWatch Internet Monitor has created and stored about a health event for a specified monitor. This information includes the impacted locations, and all the information related to the event, by location.

The information returned includes the impact on performance, availability, and round-trip time, information about the network providers (ASNs), the event type, and so on.

Information rolled up at the global traffic level is also returned, including the impact type and total traffic impact.

" }, "GetMonitor":{ "name":"GetMonitor", @@ -99,7 +99,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists all health events for a monitor in Amazon CloudWatch Internet Monitor. Returns all information for health events including the client location information the network cause and status, event start and end time, percentage of total traffic impacted, and status.

Health events that have start times during the time frame that is requested are not included in the list of health events.

" + "documentation":"

Lists all health events for a monitor in Amazon CloudWatch Internet Monitor. Returns information for health events including the event start and end time and the status.

Health events that have start times during the time frame that is requested are not included in the list of health events.

" }, "ListMonitors":{ "name":"ListMonitors", @@ -191,7 +191,7 @@ {"shape":"LimitExceededException"}, {"shape":"ValidationException"} ], - "documentation":"

Updates a monitor. You can update a monitor to change the maximum number of city-networks (locations and ASNs or internet service providers), to add or remove resources, or to change the status of the monitor. Note that you can't change the name of a monitor.

The city-network maximum that you choose is the limit, but you only pay for the number of city-networks that are actually monitored. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide.

", + "documentation":"

Updates a monitor. You can update a monitor to change the percentage of traffic to monitor or the maximum number of city-networks (locations and ASNs), to add or remove resources, or to change the status of the monitor. Note that you can't change the name of a monitor.

The city-network maximum that you choose is the limit, but you only pay for the number of city-networks that are actually monitored. For more information, see Choosing a city-network maximum value in the Amazon CloudWatch User Guide.

", "idempotent":true } }, @@ -223,14 +223,14 @@ }, "PercentOfTotalTrafficImpacted":{ "shape":"Double", - "documentation":"

The percentage of impact caused by a health event for total traffic globally.

For information about how Internet Monitor calculates impact, see Inside Internet Monitor in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.

" + "documentation":"

The impact on total traffic that a health event has, in increased latency or reduced availability. This is the percentage of how much latency has increased or availability has decreased during the event, compared to what is typical for traffic from this client location to the Amazon Web Services location using this client network.

For information about how Internet Monitor calculates impact, see How Internet Monitor works in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.

" }, "PercentOfClientLocationImpacted":{ "shape":"Double", "documentation":"

The percentage of impact caused by a health event for client location traffic globally.

For information about how Internet Monitor calculates impact, see Inside Internet Monitor in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.

" } }, - "documentation":"

Measurements about the availability for your application on the internet, calculated by Amazon CloudWatch Internet Monitor. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.

Availability in Internet Monitor represents the estimated percentage of traffic that is not seeing an availability drop. For example, an availability score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing an availability drop for that pair.

For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.

" + "documentation":"

Amazon CloudWatch Internet Monitor calculates measurements about the availability for your application's internet traffic between client locations and Amazon Web Services. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.

Availability in Internet Monitor represents the estimated percentage of traffic that is not seeing an availability drop. For example, an availability score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing an availability drop for that pair.

For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the Amazon CloudWatch User Guide.

" }, "BadRequestException":{ "type":"structure", @@ -266,7 +266,7 @@ }, "Resources":{ "shape":"SetOfARNs", - "documentation":"

The resources to include in a monitor, which you provide as a set of Amazon Resource Names (ARNs).

You can add a combination of Amazon Virtual Private Clouds (VPCs) and Amazon CloudFront distributions, or you can add Amazon WorkSpaces directories. You can't add all three types of resources.

If you add only VPC resources, at least one VPC must have an Internet Gateway attached to it, to make sure that it has internet connectivity.

" + "documentation":"

The resources to include in a monitor, which you provide as a set of Amazon Resource Names (ARNs). Resources can be VPCs, NLBs, Amazon CloudFront distributions, or Amazon WorkSpaces directories.

You can add a combination of VPCs and CloudFront distributions, or you can add WorkSpaces directories, or you can add NLBs. You can't add NLBs or WorkSpaces directories together with any other resources.

If you add only Amazon VPC resources, at least one VPC must have an Internet Gateway attached to it, to make sure that it has internet connectivity.

" }, "ClientToken":{ "shape":"String", @@ -279,7 +279,7 @@ }, "MaxCityNetworksToMonitor":{ "shape":"MaxCityNetworksToMonitor", - "documentation":"

The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network or ASN, such as an internet service provider (ISP), that clients access the resources through. This limit helps control billing costs.

To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" + "documentation":"

The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the ASN or network provider, such as an internet service provider (ISP), that clients access the resources through. Setting this limit can help control billing costs.

To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" }, "InternetMeasurementsLogDelivery":{ "shape":"InternetMeasurementsLogDelivery", @@ -287,11 +287,11 @@ }, "TrafficPercentageToMonitor":{ "shape":"TrafficPercentageToMonitor", - "documentation":"

The percentage of the internet-facing traffic for your application that you want to monitor with this monitor.

" + "documentation":"

The percentage of the internet-facing traffic for your application that you want to monitor with this monitor. If you set a city-networks maximum, that limit overrides the traffic percentage that you set.

To learn more, see Choosing an application traffic percentage to monitor in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" }, "HealthEventsConfig":{ "shape":"HealthEventsConfig", - "documentation":"

Defines the health event threshold percentages, for performance score and availability score. Internet Monitor creates a health event when there's an internet issue that affects your application end users where a health score percentage is at or below a set threshold. If you don't set a health event threshold, the default calue is 95%.

" + "documentation":"

Defines the threshold percentages and other configuration information for when Amazon CloudWatch Internet Monitor creates a health event. Internet Monitor creates a health event when an internet issue that affects your application end users has a health score percentage that is at or below a specific threshold, and, sometimes, when other criteria are met.

If you don't set a health event threshold, the default value is 95%.

For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.

" } } }, @@ -400,7 +400,7 @@ }, "PercentOfTotalTrafficImpacted":{ "shape":"Double", - "documentation":"

The impact on total traffic that a health event has.

" + "documentation":"

The impact on total traffic that a health event has, in increased latency or reduced availability. This is the percentage of how much latency has increased or availability has decreased during the event, compared to what is typical for traffic from this client location to the Amazon Web Services location using this client network.

" }, "ImpactType":{ "shape":"HealthEventImpactType", @@ -408,7 +408,7 @@ }, "HealthScoreThreshold":{ "shape":"Percentage", - "documentation":"

The threshold percentage for health events when Amazon CloudWatch Internet Monitor creates a health event.

" + "documentation":"

The threshold percentage for a health score that determines, along with other configuration information, when Internet Monitor creates a health event when there's an internet issue that affects your application end users.

" } } }, @@ -445,7 +445,7 @@ }, "Resources":{ "shape":"SetOfARNs", - "documentation":"

The resources that have been added for the monitor. Resources are listed by their Amazon Resource Names (ARNs).

" + "documentation":"

The resources monitored by the monitor. Resources are listed by their Amazon Resource Names (ARNs).

" }, "Status":{ "shape":"MonitorConfigState", @@ -473,7 +473,7 @@ }, "MaxCityNetworksToMonitor":{ "shape":"MaxCityNetworksToMonitor", - "documentation":"

The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network or ASN, such as an internet service provider (ISP), that clients access the resources through. This limit helps control billing costs.

To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" + "documentation":"

The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the ASN or network provider, such as an internet service provider (ISP), that clients access the resources through. This limit can help control billing costs.

To learn more, see Choosing a city-network maximum value in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" }, "InternetMeasurementsLogDelivery":{ "shape":"InternetMeasurementsLogDelivery", @@ -481,11 +481,11 @@ }, "TrafficPercentageToMonitor":{ "shape":"TrafficPercentageToMonitor", - "documentation":"

The percentage of the internet-facing traffic for your application that you want to monitor with this monitor.

" + "documentation":"

The percentage of the internet-facing traffic for your application to monitor with this monitor. If you set a city-networks maximum, that limit overrides the traffic percentage that you set.

To learn more, see Choosing an application traffic percentage to monitor in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" }, "HealthEventsConfig":{ "shape":"HealthEventsConfig", - "documentation":"

The list of health event thresholds. A health event threshold percentage, for performance and availability, determines the level of impact at which Amazon CloudWatch Internet Monitor creates a health event when there's an internet issue that affects your application end users.

" + "documentation":"

The list of health event threshold configurations. The threshold percentage for a health score determines, along with other configuration information, when Internet Monitor creates a health event when there's an internet issue that affects your application end users.

For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.

" } } }, @@ -535,7 +535,7 @@ }, "PercentOfTotalTrafficImpacted":{ "shape":"Double", - "documentation":"

The impact on global traffic monitored by this monitor for this health event.

" + "documentation":"

The impact on total traffic that a health event has, in increased latency or reduced availability. This is the percentage of how much latency has increased or availability has decreased during the event, compared to what is typical for traffic from this client location to the Amazon Web Services location using this client network.

" }, "ImpactType":{ "shape":"HealthEventImpactType", @@ -552,7 +552,9 @@ "type":"string", "enum":[ "AVAILABILITY", - "PERFORMANCE" + "PERFORMANCE", + "LOCAL_AVAILABILITY", + "LOCAL_PERFORMANCE" ] }, "HealthEventList":{ @@ -582,9 +584,17 @@ "PerformanceScoreThreshold":{ "shape":"Percentage", "documentation":"

The health event threshold percentage set for performance scores.

" + }, + "AvailabilityLocalHealthEventsConfig":{ + "shape":"LocalHealthEventsConfig", + "documentation":"

The configuration that determines the threshold and other conditions for when Internet Monitor creates a health event for a local availability issue.

" + }, + "PerformanceLocalHealthEventsConfig":{ + "shape":"LocalHealthEventsConfig", + "documentation":"

The configuration that determines the threshold and other conditions for when Internet Monitor creates a health event for a local performance issue.

" } }, - "documentation":"

A complex type for the configuration. Defines the health event threshold percentages, for performance score and availability score. Amazon CloudWatch Internet Monitor creates a health event when there's an internet issue that affects your application end users where a health score percentage is at or below a set threshold. If you don't set a health event threshold, the default value is 95%.

" + "documentation":"

A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for an overall performance or availability issue, across an application's geographies.

Defines the percentages, for overall performance scores and availability scores for an application, that are the thresholds for when Amazon CloudWatch Internet Monitor creates a health event. You can override the defaults to set a custom threshold for overall performance or availability scores, or both.

You can also set thresholds for local health scores,, where Internet Monitor creates a health event when scores cross a threshold for one or more city-networks, in addition to creating an event when an overall score crosses a threshold.

If you don't set a health event threshold, the default value is 95%.

For local thresholds, you also set a minimum percentage of overall traffic that is impacted by an issue before Internet Monitor creates an event. In addition, you can disable local thresholds, for performance scores, availability scores, or both.

For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.

" }, "ImpactedLocation":{ "type":"structure", @@ -692,7 +702,7 @@ "documentation":"

Performance in Internet Monitor represents the estimated percentage of traffic that is not seeing a performance drop. For example, a performance score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing a performance drop for that pair.

For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" } }, - "documentation":"

Internet health includes measurements calculated by Amazon CloudWatch Internet Monitor about the performance and availability for your application on the internet. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.

" + "documentation":"

Internet health includes measurements calculated by Amazon CloudWatch Internet Monitor about the performance and availability for your application on the internet. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, Internet Monitor reports the information to you in the form of health scores: a performance score and an availability score.

" }, "InternetMeasurementsLogDelivery":{ "type":"structure", @@ -830,6 +840,31 @@ } } }, + "LocalHealthEventsConfig":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"LocalHealthEventsConfigStatus", + "documentation":"

The status of whether Internet Monitor creates a health event based on a threshold percentage set for a local health score. The status can be ENABLED or DISABLED.

" + }, + "HealthScoreThreshold":{ + "shape":"Percentage", + "documentation":"

The health event threshold percentage set for a local health score.

" + }, + "MinTrafficImpact":{ + "shape":"Percentage", + "documentation":"

The minimum percentage of overall traffic for an application that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.

" + } + }, + "documentation":"

A complex type with the configuration information that determines the threshold and other conditions for when Internet Monitor creates a health event for a local performance or availability issue, when scores cross a threshold for one or more city-networks.

Defines the percentages, for performance scores or availability scores, that are the local thresholds for when Amazon CloudWatch Internet Monitor creates a health event. Also defines whether a local threshold is enabled or disabled, and the minimum percentage of overall traffic that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.

For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.

" + }, + "LocalHealthEventsConfigStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "LogDeliveryStatus":{ "type":"string", "enum":[ @@ -979,7 +1014,7 @@ }, "PercentOfTotalTrafficImpacted":{ "shape":"Double", - "documentation":"

How much performance impact was caused by a health event for total traffic globally. For performance, this is the percentage of how much latency increased during the event compared to typical performance for your application traffic globally.

For more information, see When Amazon Web Services creates and resolves health events in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" + "documentation":"

The impact on total traffic that a health event has, in increased latency or reduced availability. This is the percentage of how much latency has increased or availability has decreased during the event, compared to what is typical for traffic from this client location to the Amazon Web Services location using this client network.

For more information, see When Amazon Web Services creates and resolves health events in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" }, "PercentOfClientLocationImpacted":{ "shape":"Double", @@ -990,7 +1025,7 @@ "documentation":"

This is the percentage of how much round-trip time increased during the event compared to typical round-trip time for your application for traffic.

For more information, see When Amazon Web Services creates and resolves health events in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" } }, - "documentation":"

Measurements about the performance for your application on the internet calculated by Amazon CloudWatch Internet Monitor. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.

Performance in Internet Monitor represents the estimated percentage of traffic that is not seeing a performance drop. For example, a performance score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing a performance drop for that pair.

For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" + "documentation":"

Amazon CloudWatch Internet Monitor calculates measurements about the performance for your application's internet traffic between client locations and Amazon Web Services. Amazon Web Services has substantial historical data about internet performance and availability between Amazon Web Services services and different network providers and geographies. By applying statistical analysis to the data, Internet Monitor can detect when the performance and availability for your application has dropped, compared to an estimated baseline that's already calculated. To make it easier to see those drops, we report that information to you in the form of health scores: a performance score and an availability score.

Performance in Internet Monitor represents the estimated percentage of traffic that is not seeing a performance drop. For example, a performance score of 99% for an end user and service location pair is equivalent to 1% of the traffic experiencing a performance drop for that pair.

For more information, see How Internet Monitor calculates performance and availability scores in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" }, "ResourceName":{ "type":"string", @@ -1182,7 +1217,7 @@ }, "ResourcesToAdd":{ "shape":"SetOfARNs", - "documentation":"

The resources to include in a monitor, which you provide as a set of Amazon Resource Names (ARNs).

You can add a combination of Amazon Virtual Private Clouds (VPCs) and Amazon CloudFront distributions, or you can add Amazon WorkSpaces directories. You can't add all three types of resources.

If you add only VPC resources, at least one VPC must have an Internet Gateway attached to it, to make sure that it has internet connectivity.

" + "documentation":"

The resources to include in a monitor, which you provide as a set of Amazon Resource Names (ARNs). Resources can be VPCs, NLBs, Amazon CloudFront distributions, or Amazon WorkSpaces directories.

You can add a combination of VPCs and CloudFront distributions, or you can add WorkSpaces directories, or you can add NLBs. You can't add NLBs or WorkSpaces directories together with any other resources.

If you add only Amazon Virtual Private Clouds resources, at least one VPC must have an Internet Gateway attached to it, to make sure that it has internet connectivity.

" }, "ResourcesToRemove":{ "shape":"SetOfARNs", @@ -1199,7 +1234,7 @@ }, "MaxCityNetworksToMonitor":{ "shape":"MaxCityNetworksToMonitor", - "documentation":"

The maximum number of city-networks to monitor for your resources. A city-network is the location (city) where clients access your application resources from and the network or ASN, such as an internet service provider, that clients access the resources through.

" + "documentation":"

The maximum number of city-networks to monitor for your application. A city-network is the location (city) where clients access your application resources from and the ASN or network provider, such as an internet service provider (ISP), that clients access the resources through. Setting this limit can help control billing costs.

" }, "InternetMeasurementsLogDelivery":{ "shape":"InternetMeasurementsLogDelivery", @@ -1207,11 +1242,11 @@ }, "TrafficPercentageToMonitor":{ "shape":"TrafficPercentageToMonitor", - "documentation":"

The percentage of the internet-facing traffic for your application that you want to monitor with this monitor.

" + "documentation":"

The percentage of the internet-facing traffic for your application that you want to monitor with this monitor. If you set a city-networks maximum, that limit overrides the traffic percentage that you set.

To learn more, see Choosing an application traffic percentage to monitor in the Amazon CloudWatch Internet Monitor section of the CloudWatch User Guide.

" }, "HealthEventsConfig":{ "shape":"HealthEventsConfig", - "documentation":"

The list of health event thresholds. A health event threshold percentage, for performance and availability, determines when Internet Monitor creates a health event when there's an internet issue that affects your application end users.

" + "documentation":"

The list of health score thresholds. A threshold percentage for health scores, along with other configuration information, determines when Internet Monitor creates a health event when there's an internet issue that affects your application end users.

For more information, see Change health event thresholds in the Internet Monitor section of the CloudWatch User Guide.

" } } }, @@ -1245,5 +1280,5 @@ "exception":true } }, - "documentation":"

Amazon CloudWatch Internet Monitor provides visibility into how internet issues impact the performance and availability between your applications hosted on Amazon Web Services and your end users. It reduces the time it takes for you to diagnose internet issues from days to minutes. Internet Monitor uses the connectivity data that Amazon Web Services captures from its global networking footprint to calculate a baseline of performance and availability for internet traffic. This is the same data that Amazon Web Services uses to monitor internet uptime and availability. With those measurements as a baseline, Internet Monitor raises awareness for you when there are significant problems for your end users in the different geographic locations where your application runs.

Internet Monitor publishes internet measurements to CloudWatch Logs and CloudWatch Metrics, to easily support using CloudWatch tools with health information for geographies and networks specific to your application. Internet Monitor sends health events to Amazon EventBridge so that you can set up notifications. If an issue is caused by the Amazon Web Services network, you also automatically receive an Amazon Web Services Health Dashboard notification with the steps that Amazon Web Services is taking to mitigate the problem.

To use Internet Monitor, you create a monitor and associate your application's resources with it, VPCs, CloudFront distributions, or WorkSpaces directories, to enable Internet Monitor to know where your application's internet traffic is. Internet Monitor then provides internet measurements from Amazon Web Services that are specific to the locations and networks that communicate with your application.

For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide.

" + "documentation":"

Amazon CloudWatch Internet Monitor provides visibility into how internet issues impact the performance and availability between your applications hosted on Amazon Web Services and your end users. It can reduce the time it takes for you to diagnose internet issues from days to minutes. Internet Monitor uses the connectivity data that Amazon Web Services captures from its global networking footprint to calculate a baseline of performance and availability for internet traffic. This is the same data that Amazon Web Services uses to monitor internet uptime and availability. With those measurements as a baseline, Internet Monitor raises awareness for you when there are significant problems for your end users in the different geographic locations where your application runs.

Internet Monitor publishes internet measurements to CloudWatch Logs and CloudWatch Metrics, to easily support using CloudWatch tools with health information for geographies and networks specific to your application. Internet Monitor sends health events to Amazon EventBridge so that you can set up notifications. If an issue is caused by the Amazon Web Services network, you also automatically receive an Amazon Web Services Health Dashboard notification with the steps that Amazon Web Services is taking to mitigate the problem.

To use Internet Monitor, you create a monitor and associate your application's resources with it - VPCs, NLBs, CloudFront distributions, or WorkSpaces directories - so Internet Monitor can determine where your application's internet traffic is. Internet Monitor then provides internet measurements from Amazon Web Services that are specific to the locations and ASNs (typically, internet service providers or ISPs) that communicate with your application.

For more information, see Using Amazon CloudWatch Internet Monitor in the Amazon CloudWatch User Guide.

" } diff --git a/services/iot/pom.xml b/services/iot/pom.xml index 65175d3b45b..49a840587a1 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index 345db741056..d7fef23a201 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index 9b804f31be7..be8cd289fa6 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index 3257f9c6fac..d4917204760 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 9c71b107772..3907df5f297 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index 9198d93a98d..d9329f3b343 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index 095cc9ea56d..fd0fb0d0722 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index d86f9302dd3..adba4e284f7 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index f5942d95292..809a9f85c9a 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotfleetwise/pom.xml b/services/iotfleetwise/pom.xml index 26e85a0278c..7b12421d9a4 100644 --- a/services/iotfleetwise/pom.xml +++ b/services/iotfleetwise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotfleetwise AWS Java SDK :: Services :: Io T Fleet Wise diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index 0a478c6c67e..f5e4f9dbb0a 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotroborunner/pom.xml b/services/iotroborunner/pom.xml index b18056ba29c..1dab7a56f86 100644 --- a/services/iotroborunner/pom.xml +++ b/services/iotroborunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotroborunner AWS Java SDK :: Services :: IoT Robo Runner diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index 0a2ca5ed5ea..8dbff726202 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 98085044812..98c61b98f5e 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index 4c0132b9891..68abfb48dfa 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iottwinmaker/pom.xml b/services/iottwinmaker/pom.xml index d38e5b47714..832a73a6296 100644 --- a/services/iottwinmaker/pom.xml +++ b/services/iottwinmaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iottwinmaker AWS Java SDK :: Services :: Io T Twin Maker diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index a09e368b455..6d9977ddbda 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index 20a61d57c49..c7c8008a62c 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/ivschat/pom.xml b/services/ivschat/pom.xml index 1ec01195a32..654e44d2c1a 100644 --- a/services/ivschat/pom.xml +++ b/services/ivschat/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ivschat AWS Java SDK :: Services :: Ivschat diff --git a/services/ivsrealtime/pom.xml b/services/ivsrealtime/pom.xml index 6e9dc94bb2c..655c74f7324 100644 --- a/services/ivsrealtime/pom.xml +++ b/services/ivsrealtime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ivsrealtime AWS Java SDK :: Services :: IVS Real Time diff --git a/services/ivsrealtime/src/main/resources/codegen-resources/endpoint-tests.json b/services/ivsrealtime/src/main/resources/codegen-resources/endpoint-tests.json index aa2d6d179c1..d51b2c7cff9 100644 --- a/services/ivsrealtime/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/ivsrealtime/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,54 +1,54 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ivsrealtime-fips.us-gov-east-1.api.aws" + "url": "https://ivsrealtime-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1", "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime-fips.us-gov-east-1.amazonaws.com" + "url": "https://ivsrealtime-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ivsrealtime.us-gov-east-1.api.aws" + "url": "https://ivsrealtime.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1", "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime.us-gov-east-1.amazonaws.com" + "url": "https://ivsrealtime.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1", "UseDualStack": false } }, @@ -60,8 +60,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -73,8 +73,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -86,8 +86,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -99,108 +99,108 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://ivsrealtime-fips.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://ivsrealtime-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://ivsrealtime.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1", "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime.us-iso-east-1.c2s.ic.gov" + "url": "https://ivsrealtime.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://ivsrealtime-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime-fips.us-east-1.amazonaws.com" + "url": "https://ivsrealtime-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://ivsrealtime.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime.us-east-1.amazonaws.com" + "url": "https://ivsrealtime.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, @@ -210,8 +210,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -223,8 +223,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -234,8 +234,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -247,21 +247,34 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -272,8 +285,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json b/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json index edc0b300714..2146dc2e7a9 100644 --- a/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json +++ b/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json @@ -407,7 +407,7 @@ "members":{ "errorCode":{ "shape":"EventErrorCode", - "documentation":"

If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities field in CreateParticipantToken.

" + "documentation":"

If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities field in CreateParticipantToken. QUOTA_EXCEEDED indicates that the number of participants who want to publish/subscribe to a stage exceeds the quota; for more information, see Service Quotas. PUBLISHER_NOT_FOUND indicates that the participant tried to subscribe to a publisher that doesn’t exist.

" }, "eventTime":{ "shape":"Time", @@ -430,7 +430,11 @@ }, "EventErrorCode":{ "type":"string", - "enum":["INSUFFICIENT_CAPABILITIES"] + "enum":[ + "INSUFFICIENT_CAPABILITIES", + "QUOTA_EXCEEDED", + "PUBLISHER_NOT_FOUND" + ] }, "EventList":{ "type":"list", @@ -1200,5 +1204,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"

Introduction

The Amazon Interactive Video Service (IVS) stage API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

Terminology:

  • The IVS stage API sometimes is referred to as the IVS RealTime API.

  • A participant token is an authorization token used to publish/subscribe to a stage.

  • A participant object represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID There is a 1:1 mapping between participant tokens and participants.

Resources

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS):

  • Stage — A stage is a virtual space where multiple participants can exchange audio and video in real time.

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS stage API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

At most 50 tags can be applied to a resource.

Stages Endpoints

  • CreateParticipantToken — Creates an additional token for a specified stage. This can be done after stage creation or when tokens expire.

  • CreateStage — Creates a new stage (and optionally participant tokens).

  • DeleteStage — Shuts down and deletes the specified stage (disconnecting all participants).

  • DisconnectParticipant — Disconnects a specified participant and revokes the participant permanently from a specified stage.

  • GetParticipant — Gets information about the specified participant token.

  • GetStage — Gets information for the specified stage.

  • GetStageSession — Gets information for the specified stage session.

  • ListParticipantEvents — Lists events for a specified participant that occurred during a specified stage session.

  • ListParticipants — Lists all participants in a specified stage session.

  • ListStages — Gets summary information about all stages in your account, in the AWS region where the API request is processed.

  • ListStageSessions — Gets all sessions for a specified stage.

  • UpdateStage — Updates a stage’s configuration.

Tags Endpoints

  • ListTagsForResource — Gets information about AWS tags for the specified ARN.

  • TagResource — Adds or updates tags for the AWS resource with the specified ARN.

  • UntagResource — Removes tags from the resource with the specified ARN.

" + "documentation":"

Introduction

The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

Terminology:

  • A stage is a virtual space where participants can exchange video in real time.

  • A participant token is a token that authenticates a participant when they join a stage.

  • A participant object represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID There is a 1:1 mapping between participant tokens and participants.

Resources

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS Real-Time Streaming):

  • Stage — A stage is a virtual space where participants can exchange video in real time.

Tagging

A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

At most 50 tags can be applied to a resource.

Stages Endpoints

  • CreateParticipantToken — Creates an additional token for a specified stage. This can be done after stage creation or when tokens expire.

  • CreateStage — Creates a new stage (and optionally participant tokens).

  • DeleteStage — Shuts down and deletes the specified stage (disconnecting all participants).

  • DisconnectParticipant — Disconnects a specified participant and revokes the participant permanently from a specified stage.

  • GetParticipant — Gets information about the specified participant token.

  • GetStage — Gets information for the specified stage.

  • GetStageSession — Gets information for the specified stage session.

  • ListParticipantEvents — Lists events for a specified participant that occurred during a specified stage session.

  • ListParticipants — Lists all participants in a specified stage session.

  • ListStages — Gets summary information about all stages in your account, in the AWS region where the API request is processed.

  • ListStageSessions — Gets all sessions for a specified stage.

  • UpdateStage — Updates a stage’s configuration.

Tags Endpoints

  • ListTagsForResource — Gets information about AWS tags for the specified ARN.

  • TagResource — Adds or updates tags for the AWS resource with the specified ARN.

  • UntagResource — Removes tags from the resource with the specified ARN.

" } diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 1c0812ebb9f..81207b99fe4 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafka/src/main/resources/codegen-resources/paginators-1.json b/services/kafka/src/main/resources/codegen-resources/paginators-1.json index 73982f3d55e..ff5b1292668 100644 --- a/services/kafka/src/main/resources/codegen-resources/paginators-1.json +++ b/services/kafka/src/main/resources/codegen-resources/paginators-1.json @@ -36,6 +36,12 @@ "limit_key": "MaxResults", "result_key": "ClusterOperationInfoList" }, + "ListClusterOperationsV2": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ClusterOperationInfoList" + }, "ListConfigurationRevisions": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/kafka/src/main/resources/codegen-resources/service-2.json b/services/kafka/src/main/resources/codegen-resources/service-2.json index 1807f88f78d..41b7fe32811 100644 --- a/services/kafka/src/main/resources/codegen-resources/service-2.json +++ b/services/kafka/src/main/resources/codegen-resources/service-2.json @@ -478,6 +478,44 @@ ], "documentation": "\n

Returns a description of the cluster operation specified by the ARN.

\n " }, + "DescribeClusterOperationV2" : { + "name" : "DescribeClusterOperationV2", + "http" : { + "method" : "GET", + "requestUri" : "/api/v2/operations/{clusterOperationArn}", + "responseCode" : 200 + }, + "input" : { + "shape" : "DescribeClusterOperationV2Request" + }, + "output" : { + "shape" : "DescribeClusterOperationV2Response", + "documentation" : "\n

HTTP Status Code 200: OK.

" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "\n

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, { + "shape" : "UnauthorizedException", + "documentation" : "\n

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "\n

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, { + "shape" : "ForbiddenException", + "documentation" : "\n

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + }, { + "shape" : "NotFoundException", + "documentation" : "\n

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, { + "shape" : "ServiceUnavailableException", + "documentation" : "\n

HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "\n

HTTP Status Code 429: Limit exceeded. Resource limit reached.

" + } ], + "documentation" : "\n

Returns a description of the cluster operation specified by the ARN.

\n" + }, "DescribeConfiguration": { "name": "DescribeConfiguration", "http": { @@ -795,6 +833,44 @@ ], "documentation": "\n

Returns a list of all the operations that have been performed on the specified MSK cluster.

\n " }, + "ListClusterOperationsV2" : { + "name" : "ListClusterOperationsV2", + "http" : { + "method" : "GET", + "requestUri" : "/api/v2/clusters/{clusterArn}/operations", + "responseCode" : 200 + }, + "input" : { + "shape" : "ListClusterOperationsV2Request" + }, + "output" : { + "shape" : "ListClusterOperationsV2Response", + "documentation" : "\n

HTTP Status Code 200: OK.

" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "\n

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

" + }, { + "shape" : "UnauthorizedException", + "documentation" : "\n

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "\n

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

" + }, { + "shape" : "ForbiddenException", + "documentation" : "\n

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

" + }, { + "shape" : "NotFoundException", + "documentation" : "\n

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

" + }, { + "shape" : "ServiceUnavailableException", + "documentation" : "\n

HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "\n

HTTP Status Code 429: Limit exceeded. Resource limit reached.

" + } ], + "documentation" : "\n

Returns a list of all the operations that have been performed on the specified MSK cluster.

\n " + }, "ListClusters": { "name": "ListClusters", "http": { @@ -2977,6 +3053,140 @@ } } }, + "ClusterOperationV2" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "locationName" : "clusterArn", + "documentation" : "\n

ARN of the cluster.

" + }, + "ClusterType" : { + "shape" : "ClusterType", + "locationName" : "clusterType", + "documentation" : "\n

Type of the backend cluster.

" + }, + "StartTime" : { + "shape" : "__timestampIso8601", + "locationName" : "startTime", + "documentation" : "\n

The time at which operation was started.

" + }, + "EndTime" : { + "shape" : "__timestampIso8601", + "locationName" : "endTime", + "documentation" : "\n

The time at which the operation finished.

" + }, + "ErrorInfo" : { + "shape" : "ErrorInfo", + "locationName" : "errorInfo", + "documentation" : "\n

If cluster operation failed from an error, it describes the error.

" + }, + "OperationArn" : { + "shape" : "__string", + "locationName" : "operationArn", + "documentation" : "\n

ARN of the cluster operation.

" + }, + "OperationState" : { + "shape" : "__string", + "locationName" : "operationState", + "documentation" : "\n

State of the cluster operation.

" + }, + "OperationType" : { + "shape" : "__string", + "locationName" : "operationType", + "documentation" : "\n

Type of the cluster operation.

" + }, + "Provisioned" : { + "shape" : "ClusterOperationV2Provisioned", + "locationName" : "provisioned", + "documentation" : "\n

Properties of a provisioned cluster.

" + }, + "Serverless" : { + "shape" : "ClusterOperationV2Serverless", + "locationName" : "serverless", + "documentation" : "\n

Properties of a serverless cluster.

" + } + }, + "documentation" : "\n

Returns information about a cluster operation.

" + }, + "ClusterOperationV2Provisioned" : { + "type" : "structure", + "members" : { + "OperationSteps" : { + "shape" : "__listOfClusterOperationStep", + "locationName" : "operationSteps", + "documentation" : "\n

Steps completed during the operation.

" + }, + "SourceClusterInfo" : { + "shape" : "MutableClusterInfo", + "locationName" : "sourceClusterInfo", + "documentation" : "\n

Information about cluster attributes before a cluster is updated.

" + }, + "TargetClusterInfo" : { + "shape" : "MutableClusterInfo", + "locationName" : "targetClusterInfo", + "documentation" : "\n

Information about cluster attributes after a cluster is updated.

" + }, + "VpcConnectionInfo" : { + "shape" : "VpcConnectionInfo", + "locationName" : "vpcConnectionInfo", + "documentation" : "\n

Description of the VPC connection for CreateVpcConnection and DeleteVpcConnection operations.

" + } + }, + "documentation" : "\n

Returns information about a provisioned cluster operation.

" + }, + "ClusterOperationV2Serverless" : { + "type" : "structure", + "members" : { + "VpcConnectionInfo" : { + "shape" : "VpcConnectionInfoServerless", + "locationName" : "vpcConnectionInfo", + "documentation" : "\n

Description of the VPC connection for CreateVpcConnection and DeleteVpcConnection operations.

" + } + }, + "documentation" : "\n

Returns information about a serverless cluster operation.

" + }, + "ClusterOperationV2Summary" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "locationName" : "clusterArn", + "documentation" : "\n

ARN of the cluster.

" + }, + "ClusterType" : { + "shape" : "ClusterType", + "locationName" : "clusterType", + "documentation" : "\n

Type of the backend cluster.

" + }, + "StartTime" : { + "shape" : "__timestampIso8601", + "locationName" : "startTime", + "documentation" : "\n

The time at which operation was started.

" + }, + "EndTime" : { + "shape" : "__timestampIso8601", + "locationName" : "endTime", + "documentation" : "\n

The time at which the operation finished.

" + }, + "OperationArn" : { + "shape" : "__string", + "locationName" : "operationArn", + "documentation" : "\n

ARN of the cluster operation.

" + }, + "OperationState" : { + "shape" : "__string", + "locationName" : "operationState", + "documentation" : "\n

State of the cluster operation.

" + }, + "OperationType" : { + "shape" : "__string", + "locationName" : "operationType", + "documentation" : "\n

Type of the cluster operation.

" + } + }, + "documentation" : "\n

Returns information about a cluster operation.

" + }, "DeleteClusterRequest": { "type": "structure", "members": { @@ -3103,6 +3313,18 @@ "ClusterOperationArn" ] }, + "DescribeClusterOperationV2Request" : { + "type" : "structure", + "members" : { + "ClusterOperationArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "clusterOperationArn", + "documentation" : "ARN of the cluster operation to describe." + } + }, + "required" : [ "ClusterOperationArn" ] + }, "DescribeClusterOperationResponse": { "type": "structure", "members": { @@ -3113,6 +3335,16 @@ } } }, + "DescribeClusterOperationV2Response" : { + "type" : "structure", + "members" : { + "ClusterOperationInfo" : { + "shape" : "ClusterOperationV2", + "locationName" : "clusterOperationInfo", + "documentation" : "\n

Cluster operation information

" + } + } + }, "DescribeClusterRequest": { "type": "structure", "members": { @@ -3690,6 +3922,30 @@ "ClusterArn" ] }, + "ListClusterOperationsV2Request" : { + "type" : "structure", + "members" : { + "ClusterArn" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "clusterArn", + "documentation" : "The arn of the cluster whose operations are being requested." + }, + "MaxResults" : { + "shape" : "MaxResults", + "location" : "querystring", + "locationName" : "maxResults", + "documentation" : "The maxResults of the query." + }, + "NextToken" : { + "shape" : "__string", + "location" : "querystring", + "locationName" : "nextToken", + "documentation" : "The nextToken of the query." + } + }, + "required" : [ "ClusterArn" ] + }, "ListClusterOperationsResponse": { "type": "structure", "members": { @@ -3705,6 +3961,21 @@ } } }, + "ListClusterOperationsV2Response" : { + "type" : "structure", + "members" : { + "ClusterOperationInfoList" : { + "shape" : "__listOfClusterOperationV2Summary", + "locationName" : "clusterOperationInfoList", + "documentation" : "\n

An array of cluster operation information objects.

" + }, + "NextToken" : { + "shape" : "__string", + "locationName" : "nextToken", + "documentation" : "\n

If the response of ListClusterOperationsV2 is truncated, it returns a NextToken in the response. This NextToken should be sent in the subsequent request to ListClusterOperationsV2.

" + } + } + }, "ListClustersRequest": { "type": "structure", "members": { @@ -5195,6 +5466,32 @@ }, "documentation": "\n

Description of the VPC connection.

\n " }, + "VpcConnectionInfoServerless" : { + "type" : "structure", + "members" : { + "CreationTime" : { + "shape" : "__timestampIso8601", + "locationName" : "creationTime", + "documentation" : "\n

The time when Amazon MSK creates the VPC Connnection.

" + }, + "Owner" : { + "shape" : "__string", + "locationName" : "owner", + "documentation" : "\n

The owner of the VPC Connection.

" + }, + "UserIdentity" : { + "shape" : "UserIdentity", + "locationName" : "userIdentity", + "documentation" : "\n

Description of the requester that calls the API operation.

" + }, + "VpcConnectionArn" : { + "shape" : "__string", + "locationName" : "vpcConnectionArn", + "documentation" : "\n

The Amazon Resource Name (ARN) of the VPC connection.

" + } + }, + "documentation" : "Description of the VPC connection." + }, "VpcConnectionState": { "type": "string", "documentation": "\n

The state of a VPC connection.

\n ", @@ -5297,6 +5594,12 @@ "shape": "ClusterOperationInfo" } }, + "__listOfClusterOperationV2Summary" : { + "type" : "list", + "member" : { + "shape" : "ClusterOperationV2Summary" + } + }, "__listOfClusterOperationStep" : { "type" : "list", "member" : { diff --git a/services/kafkaconnect/pom.xml b/services/kafkaconnect/pom.xml index 72dd89e7c6a..1ca2ce35079 100644 --- a/services/kafkaconnect/pom.xml +++ b/services/kafkaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kafkaconnect AWS Java SDK :: Services :: Kafka Connect diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index 147c474888e..b73589c87be 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendraranking/pom.xml b/services/kendraranking/pom.xml index e20b2c92494..f20d54b2bbd 100644 --- a/services/kendraranking/pom.xml +++ b/services/kendraranking/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kendraranking AWS Java SDK :: Services :: Kendra Ranking diff --git a/services/keyspaces/pom.xml b/services/keyspaces/pom.xml index d26addda132..4d16be2e20c 100644 --- a/services/keyspaces/pom.xml +++ b/services/keyspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT keyspaces AWS Java SDK :: Services :: Keyspaces diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 6fcebaf17d6..9dc90d1215c 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index 967ab319033..e76049d9645 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index 7552590e154..702a9f6c54b 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 6f2c886075a..296f355b1af 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/kinesisvideo/src/main/resources/codegen-resources/endpoint-rule-set.json index f61682a8f1f..6da12c24ec7 100644 --- a/services/kinesisvideo/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/kinesisvideo/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://kinesisvideo.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://kinesisvideo.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://kinesisvideo.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://kinesisvideo.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json b/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json index 489bea376b0..2f761ae0947 100644 --- a/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json +++ b/services/kinesisvideo/src/main/resources/codegen-resources/service-2.json @@ -462,7 +462,7 @@ {"shape":"AccessDeniedException"}, {"shape":"NoDataRetentionException"} ], - "documentation":"

Associates a SignalingChannel to a stream to store the media. There are two signaling modes that can specified :

  • If the StorageStatus is disabled, no data will be stored, and the StreamARN parameter will not be needed.

  • If the StorageStatus is enabled, the data will be stored in the StreamARN provided.

" + "documentation":"

Associates a SignalingChannel to a stream to store the media. There are two signaling modes that can specified :

  • If the StorageStatus is disabled, no data will be stored, and the StreamARN parameter will not be needed.

  • If the StorageStatus is enabled, the data will be stored in the StreamARN provided.

If StorageStatus is enabled, direct peer-to-peer (master-viewer) connections no longer occur. Peers connect directly to the storage session. You must call the JoinStorageSession API to trigger an SDP offer send and establish a connection between a peer and the storage session.

" }, "UpdateNotificationConfiguration":{ "name":"UpdateNotificationConfiguration", @@ -1754,11 +1754,7 @@ "max":50, "min":1 }, - "SamplingInterval":{ - "type":"integer", - "max":20000, - "min":3000 - }, + "SamplingInterval":{"type":"integer"}, "ScheduleConfig":{ "type":"structure", "required":[ diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index a906003899f..cb479896aee 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/endpoint-rule-set.json index 33f59063d56..f61682a8f1f 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://kinesisvideo.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://kinesisvideo.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://kinesisvideo.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://kinesisvideo.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/endpoint-tests.json b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/endpoint-tests.json index 286590f5a81..3e71d1d389b 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,1191 +1,402 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-iso-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-iso-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kinesisvideo.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.ap-east-1.api.aws" + "url": "https://kinesisvideo.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.ap-east-1.amazonaws.com" + "url": "https://kinesisvideo.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, "Region": "ap-east-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.ap-east-1.api.aws" + "url": "https://kinesisvideo.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.ap-east-1.amazonaws.com" + "url": "https://kinesisvideo.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://kinesisvideo.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.cn-north-1.amazonaws.com.cn" + "url": "https://kinesisvideo.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://kinesisvideo.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.cn-north-1.amazonaws.com.cn" + "url": "https://kinesisvideo.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.us-gov-west-1.api.aws" + "url": "https://kinesisvideo.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.us-gov-west-1.amazonaws.com" + "url": "https://kinesisvideo.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.us-gov-west-1.api.aws" + "url": "https://kinesisvideo.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.us-gov-west-1.amazonaws.com" + "url": "https://kinesisvideo.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.ap-southeast-1.api.aws" + "url": "https://kinesisvideo.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.ap-southeast-1.amazonaws.com" + "url": "https://kinesisvideo.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.ap-southeast-1.api.aws" + "url": "https://kinesisvideo.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.ap-southeast-1.amazonaws.com" + "url": "https://kinesisvideo.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.ap-southeast-2.api.aws" + "url": "https://kinesisvideo-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.ap-southeast-2.amazonaws.com" + "url": "https://kinesisvideo-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.ap-southeast-2.api.aws" + "url": "https://kinesisvideo.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.ap-southeast-2.amazonaws.com" + "url": "https://kinesisvideo-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://kinesisvideo-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://kinesisvideo.cn-north-1.api.amazonwebservices.com.cn" + } }, "params": { - "UseDualStack": true, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.us-iso-east-1.c2s.ic.gov" + "url": "https://kinesisvideo.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "us-iso-east-1", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.us-east-1.api.aws" + "url": "https://kinesisvideo-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.us-east-1.amazonaws.com" + "url": "https://kinesisvideo-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.us-east-1.api.aws" + "url": "https://kinesisvideo.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.us-east-1.amazonaws.com" + "url": "https://kinesisvideo.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://kinesisvideo-fips.us-east-2.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo-fips.us-east-2.amazonaws.com" + "url": "https://kinesisvideo-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://kinesisvideo.us-east-2.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kinesisvideo.us-east-2.amazonaws.com" + "url": "https://kinesisvideo.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -1194,9 +405,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -1207,9 +418,9 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -1218,9 +429,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -1231,22 +442,35 @@ } }, "params": { - "UseDualStack": false, "Region": "us-isob-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1256,9 +480,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1268,11 +492,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json index a074f379800..88ff5a3f413 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"InvalidMediaFrameException"}, {"shape":"NoDataRetentionException"} ], - "documentation":"

Downloads an MP4 file (clip) containing the archived, on-demand media from the specified video stream over the specified time range.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

As a prerequisite to using GetCLip API, you must obtain an endpoint using GetDataEndpoint, specifying GET_CLIP for the APIName parameter.

An Amazon Kinesis video stream has the following requirements for providing data through MP4:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.

You can monitor the amount of outgoing data by monitoring the GetClip.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for outgoing AWS data apply.

" + "documentation":"

Downloads an MP4 file (clip) containing the archived, on-demand media from the specified video stream over the specified time range.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

As a prerequisite to using GetCLip API, you must obtain an endpoint using GetDataEndpoint, specifying GET_CLIP for the APIName parameter.

An Amazon Kinesis video stream has the following requirements for providing data through MP4:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.

You can monitor the amount of outgoing data by monitoring the GetClip.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and Amazon Web Services Pricing. Charges for outgoing Amazon Web Services data apply.

" }, "GetDASHStreamingSessionURL":{ "name":"GetDASHStreamingSessionURL", @@ -50,7 +50,7 @@ {"shape":"MissingCodecPrivateDataException"}, {"shape":"InvalidCodecPrivateDataException"} ], - "documentation":"

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).

    Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you use with your AWS credentials.

    The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for G.711).

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).

    Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you use with your Amazon Web Services credentials.

    The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and Amazon Web Services Pricing. Charges for both HLS sessions and outgoing Amazon Web Services data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to Amazon Web Services the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "GetHLSStreamingSessionURL":{ "name":"GetHLSStreamingSessionURL", @@ -70,7 +70,7 @@ {"shape":"MissingCodecPrivateDataException"}, {"shape":"InvalidCodecPrivateDataException"} ], - "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

  • The media must contain h.264 or h.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC (for h.264) or V_MPEG/ISO/HEVC (for h.265). Optionally, the codec ID of track 2 should be A_AAC.

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.

      Data retrieved with this action is billable. See Pricing for details.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

A streaming session URL must not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.

Both the StreamName and the StreamARN parameters are optional, but you must specify either the StreamName or the StreamARN when invoking this API operation.

An Amazon Kinesis video stream has the following requirements for providing data through HLS:

  • For streaming video, the media must contain H.264 or H.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC (for H.264) or V_MPEG/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC. For audio only streaming, the codec ID of track 1 should be A_AAC.

  • Data retention must be greater than 0.

  • The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.

  • The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.

The following procedure shows how to use HLS with Kinesis Video Streams:

  1. Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter.

  2. Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).

    Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your Amazon Web Services credentials.

    The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.

  3. Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.

  4. The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:

    • GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist action for each track, and additional metadata for the media player, including estimated bitrate and resolution.

    • GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.

    • GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp\" and \"moov\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.

      The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.

    • GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof\" and \"mdat\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.

      For the HLS streaming session, in-track codec private data (CPD) changes are supported. After the first media fragment is made available in a streaming session, fragments can contain CPD changes for each track. Therefore, the fragments in a session can have a different resolution, bit rate, or other information in the CPD without interrupting playback. However, any change made in the track number or track codec format can return an error when those different media fragments are loaded. For example, streaming will fail if the fragments in the stream change from having only video to having both audio and video, or if an AAC audio track is changed to an ALAW audio track. For each streaming session, only 500 CPD changes are allowed.

      Data retrieved with this action is billable. For information, see Pricing.

    • GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.

      If the ContainerFormat is MPEG_TS, this API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve stream media.

      Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.

A streaming session URL must not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.

You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and Amazon Web Services Pricing. Charges for both HLS sessions and outgoing Amazon Web Services data apply.

For more information about HLS, see HTTP Live Streaming on the Apple Developer site.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to Amazon Web Services, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "GetImages":{ "name":"GetImages", @@ -102,7 +102,7 @@ {"shape":"ClientLimitExceededException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

For limits, see Kinesis Video Streams Limits.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.

You must first call the GetDataEndpoint API to get an endpoint. Then send the GetMediaForFragmentList requests to this endpoint using the --endpoint-url parameter.

For limits, see Kinesis Video Streams Limits.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to Amazon Web Services, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" }, "ListFragments":{ "name":"ListFragments", @@ -118,7 +118,7 @@ {"shape":"ClientLimitExceededException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Returns a list of Fragment objects from the specified stream and timestamp range within the archived data.

Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to ListFragments. However, results are typically available in less than one second.

You must first call the GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" + "documentation":"

Returns a list of Fragment objects from the specified stream and timestamp range within the archived data.

Listing fragments is eventually consistent. This means that even if the producer receives an acknowledgment that a fragment is persisted, the result might not be returned immediately from a request to ListFragments. However, results are typically available in less than one second.

You must first call the GetDataEndpoint API to get an endpoint. Then send the ListFragments requests to this endpoint using the --endpoint-url parameter.

If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:

  • x-amz-ErrorType HTTP header – contains a more specific error type in addition to what the HTTP status code provides.

  • x-amz-RequestId HTTP header – if you want to report an issue to Amazon Web Services, the support team can better diagnose the problem if given the Request Id.

Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.

For more information, see the Errors section at the bottom of this topic, as well as Common Errors.

" } }, "shapes":{ @@ -297,7 +297,7 @@ }, "ServerTimestamp":{ "shape":"Timestamp", - "documentation":"

The timestamp from the AWS server corresponding to the fragment.

" + "documentation":"

The timestamp from the Amazon Web Services server corresponding to the fragment.

" }, "FragmentLengthInMilliseconds":{ "shape":"Long", @@ -483,7 +483,6 @@ "ImageSelectorType", "StartTimestamp", "EndTimestamp", - "SamplingInterval", "Format" ], "members":{ @@ -505,11 +504,11 @@ }, "EndTimestamp":{ "shape":"Timestamp", - "documentation":"

The end timestamp for the range of images to be generated.

" + "documentation":"

The end timestamp for the range of images to be generated. If the time range between StartTimestamp and EndTimestamp is more than 300 seconds above StartTimestamp, you will receive an IllegalArgumentException.

" }, "SamplingInterval":{ "shape":"SamplingInterval", - "documentation":"

The time interval in milliseconds (ms) at which the images need to be generated from the stream. The minimum value that can be provided is 3000 ms. If the timestamp range is less than the sampling interval, the Image from the startTimestamp will be returned if available.

The minimum value of 3000 ms is a soft limit. If needed, a lower sampling frequency can be requested.

" + "documentation":"

The time interval in milliseconds (ms) at which the images need to be generated from the stream, with a default of 3000 ms. The minimum value that can be provided is 200 ms. If the timestamp range is less than the sampling interval, the Image from the startTimestamp will be returned if available.

The minimum value of 200 ms is a hard limit.

" }, "Format":{ "shape":"Format", @@ -529,7 +528,7 @@ }, "MaxResults":{ "shape":"GetImagesMaxResults", - "documentation":"

The maximum number of images to be returned by the API.

The default limit is 100 images per API response. The additional results will be paginated.

" + "documentation":"

The maximum number of images to be returned by the API.

The default limit is 25 images per API response. Providing a MaxResults greater than this value will result in a page size of 25. Any additional results will be paginated.

" }, "NextToken":{ "shape":"NextToken", @@ -584,7 +583,7 @@ }, "Payload":{ "shape":"Payload", - "documentation":"

The payload that Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the GetMediaForFragmentList call also include the following additional Matroska (MKV) tags:

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.

  • AWS_KINESISVIDEO_SERVER_SIDE_TIMESTAMP - Server-side timestamp of the fragment.

  • AWS_KINESISVIDEO_PRODUCER_SIDE_TIMESTAMP - Producer-side timestamp of the fragment.

The following tags will be included if an exception occurs:

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - The number of the fragment that threw the exception

  • AWS_KINESISVIDEO_EXCEPTION_ERROR_CODE - The integer code of the exception

  • AWS_KINESISVIDEO_EXCEPTION_MESSAGE - A text description of the exception

" + "documentation":"

The payload that Kinesis Video Streams returns is a sequence of chunks from the specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the GetMediaForFragmentList call also include the following additional Matroska (MKV) tags:

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.

  • AWS_KINESISVIDEO_SERVER_SIDE_TIMESTAMP - Server-side timestamp of the fragment.

  • AWS_KINESISVIDEO_PRODUCER_SIDE_TIMESTAMP - Producer-side timestamp of the fragment.

The following tags will be included if an exception occurs:

  • AWS_KINESISVIDEO_FRAGMENT_NUMBER - The number of the fragment that threw the exception

  • AWS_KINESISVIDEO_EXCEPTION_ERROR_CODE - The integer code of the

  • AWS_KINESISVIDEO_EXCEPTION_MESSAGE - A text description of the exception

" } }, "payload":"Payload" @@ -818,15 +817,11 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

GetMedia throws this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw this error if a session with a PlaybackMode of ON_DEMAND or LIVE_REPLAYis requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

", + "documentation":"

GetImages will throw this error when Kinesis Video Streams can't find the stream that you specified.

GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw this error if a session with a PlaybackMode of ON_DEMAND or LIVE_REPLAYis requested for a stream that has no fragments within the requested time range, or if a session with a PlaybackMode of LIVE is requested for a stream that has no fragments within the last 30 seconds.

", "error":{"httpStatusCode":404}, "exception":true }, - "SamplingInterval":{ - "type":"integer", - "max":20000, - "min":3000 - }, + "SamplingInterval":{"type":"integer"}, "StreamName":{ "type":"string", "max":256, diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index 5b4c0eb7e1c..55832fdd3b6 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index aac083a99ee..8756eafe475 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kinesisvideowebrtcstorage/pom.xml b/services/kinesisvideowebrtcstorage/pom.xml index 164e2d9151c..ef9939dacb9 100644 --- a/services/kinesisvideowebrtcstorage/pom.xml +++ b/services/kinesisvideowebrtcstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kinesisvideowebrtcstorage AWS Java SDK :: Services :: Kinesis Video Web RTC Storage diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 6eafc554d12..5aec2c2d6cc 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index b0afbae0486..bd50a703a6b 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index 55e7cddccda..a474099293a 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index cce6d7d736b..2b93c96071f 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -5229,7 +5229,8 @@ "nodejs18.x", "python3.10", "java17", - "ruby3.2" + "ruby3.2", + "python3.11" ] }, "RuntimeVersionArn":{ diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index 5c8382cfeec..83d194f193e 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index 54c13530950..da511654ac9 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index d6f4ac06d11..67b37cfdd20 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index 3bfbdb9faca..b3912070dad 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index 6872f431f53..d163efe6f1a 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanagerlinuxsubscriptions/pom.xml b/services/licensemanagerlinuxsubscriptions/pom.xml index c8e3f0b7bef..c0ddca90edb 100644 --- a/services/licensemanagerlinuxsubscriptions/pom.xml +++ b/services/licensemanagerlinuxsubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT licensemanagerlinuxsubscriptions AWS Java SDK :: Services :: License Manager Linux Subscriptions diff --git a/services/licensemanagerusersubscriptions/pom.xml b/services/licensemanagerusersubscriptions/pom.xml index 3266a88b9f6..e34ed4a7743 100644 --- a/services/licensemanagerusersubscriptions/pom.xml +++ b/services/licensemanagerusersubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT licensemanagerusersubscriptions AWS Java SDK :: Services :: License Manager User Subscriptions diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index f148c287e08..53095b2a665 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/location/pom.xml b/services/location/pom.xml index 888e60c574f..4001be5f9ed 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 975e8ebeaf9..381a2b63c8d 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json index 11910d47fac..00e0ece3871 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,64 +45,17 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "ref": "UseFIPS" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + true ] } - ] + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" }, { "conditions": [ @@ -111,19 +63,51 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "booleanEquals", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -133,90 +117,109 @@ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://lookoutequipment-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://lookoutequipment-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -229,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://lookoutequipment.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://lookoutequipment.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://lookoutequipment.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://lookoutequipment.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-tests.json b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-tests.json index 6128fb14740..4cc41e239d7 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,163 +1,299 @@ { "testCases": [ { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.eu-west-1.api.aws" + "url": "https://lookoutequipment-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.eu-west-1.amazonaws.com" + "url": "https://lookoutequipment-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.eu-west-1.api.aws" + "url": "https://lookoutequipment.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-1" + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.eu-west-1.amazonaws.com" + "url": "https://lookoutequipment-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://lookoutequipment.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.ap-northeast-2.api.aws" + "url": "https://lookoutequipment-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.ap-northeast-2.amazonaws.com" + "url": "https://lookoutequipment-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.ap-northeast-2.api.aws" + "url": "https://lookoutequipment.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-2" + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.ap-northeast-2.amazonaws.com" + "url": "https://lookoutequipment.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.us-east-1.api.aws" + "url": "https://lookoutequipment-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment-fips.us-east-1.amazonaws.com" + "url": "https://lookoutequipment.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.us-east-1.api.aws" + "url": "https://lookoutequipment-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://lookoutequipment.us-east-1.amazonaws.com" + "url": "https://lookoutequipment.us-isob-east-1.sc2s.sgov.gov" } }, "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -166,7 +302,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -176,9 +311,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -188,11 +323,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/paginators-1.json b/services/lookoutequipment/src/main/resources/codegen-resources/paginators-1.json index 25be4169143..18e13f7d3a9 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/paginators-1.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/paginators-1.json @@ -35,6 +35,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListModelVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListModels": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json b/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json index a98b749de4b..e6a07920eaa 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json +++ b/services/lookoutequipment/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a container for a collection of data being ingested for analysis. The dataset contains the metadata describing where the data is and what the data actually looks like. In other words, it contains the location of the data source, the data schema, and other information. A dataset also contains any tags associated with the ingested data.

" + "documentation":"

Creates a container for a collection of data being ingested for analysis. The dataset contains the metadata describing where the data is and what the data actually looks like. For example, it contains the location of the data source, the data schema, and other information. A dataset also contains any tags associated with the ingested data.

" }, "CreateInferenceScheduler":{ "name":"CreateInferenceScheduler", @@ -191,6 +191,23 @@ ], "documentation":"

Deletes an ML model currently available for Amazon Lookout for Equipment. This will prevent it from being used with an inference scheduler, even one that is already set up.

" }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourcePolicyRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes the resource policy attached to the resource.

" + }, "DescribeDataIngestionJob":{ "name":"DescribeDataIngestionJob", "http":{ @@ -293,6 +310,78 @@ ], "documentation":"

Provides a JSON containing the overall information about a specific ML model, including model name and ARN, dataset, training and evaluation information, status, and so on.

" }, + "DescribeModelVersion":{ + "name":"DescribeModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeModelVersionRequest"}, + "output":{"shape":"DescribeModelVersionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves information about a specific machine learning model version.

" + }, + "DescribeResourcePolicy":{ + "name":"DescribeResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourcePolicyRequest"}, + "output":{"shape":"DescribeResourcePolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Provides the details of a resource policy attached to a resource.

" + }, + "ImportDataset":{ + "name":"ImportDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportDatasetRequest"}, + "output":{"shape":"ImportDatasetResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Imports a dataset.

" + }, + "ImportModelVersion":{ + "name":"ImportModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ImportModelVersionRequest"}, + "output":{"shape":"ImportModelVersionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Imports a model that has been trained successfully.

" + }, "ListDataIngestionJobs":{ "name":"ListDataIngestionJobs", "http":{ @@ -407,6 +496,23 @@ ], "documentation":"

Provides a list of labels.

" }, + "ListModelVersions":{ + "name":"ListModelVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListModelVersionsRequest"}, + "output":{"shape":"ListModelVersionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Generates a list of all model versions for a given model, including the model version, model version ARN, and status. To list a subset of versions, use the MaxModelVersion and MinModelVersion fields.

" + }, "ListModels":{ "name":"ListModels", "http":{ @@ -457,6 +563,25 @@ ], "documentation":"

Lists all the tags for a specified resource, including key and value.

" }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutResourcePolicyRequest"}, + "output":{"shape":"PutResourcePolicyResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a resource control policy for a given resource.

" + }, "StartDataIngestionJob":{ "name":"StartDataIngestionJob", "http":{ @@ -547,6 +672,24 @@ ], "documentation":"

Removes a specific tag from a given resource. The tag is specified by its key.

" }, + "UpdateActiveModelVersion":{ + "name":"UpdateActiveModelVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateActiveModelVersionRequest"}, + "output":{"shape":"UpdateActiveModelVersionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Sets the active model version for a given machine learning model.

" + }, "UpdateInferenceScheduler":{ "name":"UpdateInferenceScheduler", "http":{ @@ -818,7 +961,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"

The ARN of the label group that you have created.

" + "documentation":"

The Amazon Resource Name (ARN) of the label group that you have created.

" } } }, @@ -1082,7 +1225,8 @@ "enum":[ "CREATED", "INGESTION_IN_PROGRESS", - "ACTIVE" + "ACTIVE", + "IMPORT_IN_PROGRESS" ] }, "DatasetSummaries":{ @@ -1168,6 +1312,16 @@ } } }, + "DeleteResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource for which the resource policy should be deleted.

" + } + } + }, "DescribeDataIngestionJobRequest":{ "type":"structure", "required":["JobId"], @@ -1229,6 +1383,10 @@ "DataEndTime":{ "shape":"Timestamp", "documentation":"

Indicates the latest timestamp corresponding to data that was successfully ingested during this specific ingestion job.

" + }, + "SourceDatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the source dataset from which the data used for the data ingestion job was imported from.

" } } }, @@ -1297,6 +1455,10 @@ "DataEndTime":{ "shape":"Timestamp", "documentation":"

Indicates the latest timestamp corresponding to data that was successfully ingested during the most recent ingestion of this particular dataset.

" + }, + "SourceDatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the source dataset from which the current data being described was imported from.

" } } }, @@ -1390,7 +1552,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"

The ARN of the label group.

" + "documentation":"

The Amazon Resource Name (ARN) of the label group.

" }, "FaultCodes":{ "shape":"FaultCodes", @@ -1432,7 +1594,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"

The ARN of the requested label group.

" + "documentation":"

The Amazon Resource Name (ARN) of the requested label group.

" }, "LabelId":{ "shape":"LabelId", @@ -1566,6 +1728,201 @@ "OffCondition":{ "shape":"OffCondition", "documentation":"

Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.

" + }, + "SourceModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the source model version. This field appears if the active model version was imported.

" + }, + "ImportJobStartTime":{ + "shape":"Timestamp", + "documentation":"

The date and time when the import job was started. This field appears if the active model version was imported.

" + }, + "ImportJobEndTime":{ + "shape":"Timestamp", + "documentation":"

The date and time when the import job was completed. This field appears if the active model version was imported.

" + }, + "ActiveModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The name of the model version used by the inference schedular when running a scheduled inference execution.

" + }, + "ActiveModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the model version used by the inference scheduler when running a scheduled inference execution.

" + }, + "ModelVersionActivatedAt":{ + "shape":"Timestamp", + "documentation":"

The date the active model version was activated.

" + }, + "PreviousActiveModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The model version that was set as the active model version prior to the current active model version.

" + }, + "PreviousActiveModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The ARN of the model version that was set as the active model version prior to the current active model version.

" + }, + "PreviousModelVersionActivatedAt":{ + "shape":"Timestamp", + "documentation":"

The date and time when the previous active model version was activated.

" + } + } + }, + "DescribeModelVersionRequest":{ + "type":"structure", + "required":[ + "ModelName", + "ModelVersion" + ], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the machine learning model that this version belongs to.

" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the machine learning model.

" + } + } + }, + "DescribeModelVersionResponse":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the machine learning model that this version belongs to.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the parent machine learning model that this version belong to.

" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the machine learning model.

" + }, + "ModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the model version.

" + }, + "Status":{ + "shape":"ModelVersionStatus", + "documentation":"

The current status of the model version.

" + }, + "SourceType":{ + "shape":"ModelVersionSourceType", + "documentation":"

Indicates whether this model version was created by training or by importing.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset used to train the model version.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset used to train the model version.

" + }, + "Schema":{ + "shape":"InlineDataSchema", + "documentation":"

The schema of the data used to train the model version.

" + }, + "LabelsInputConfiguration":{"shape":"LabelsInputConfiguration"}, + "TrainingDataStartTime":{ + "shape":"Timestamp", + "documentation":"

The date on which the training data began being gathered. If you imported the version, this is the date that the training data in the source version began being gathered.

" + }, + "TrainingDataEndTime":{ + "shape":"Timestamp", + "documentation":"

The date on which the training data finished being gathered. If you imported the version, this is the date that the training data in the source version finished being gathered.

" + }, + "EvaluationDataStartTime":{ + "shape":"Timestamp", + "documentation":"

The date on which the data in the evaluation set began being gathered. If you imported the version, this is the date that the evaluation set data in the source version began being gathered.

" + }, + "EvaluationDataEndTime":{ + "shape":"Timestamp", + "documentation":"

The date on which the data in the evaluation set began being gathered. If you imported the version, this is the date that the evaluation set data in the source version finished being gathered.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the role that was used to train the model version.

" + }, + "DataPreProcessingConfiguration":{"shape":"DataPreProcessingConfiguration"}, + "TrainingExecutionStartTime":{ + "shape":"Timestamp", + "documentation":"

The time when the training of the version began.

" + }, + "TrainingExecutionEndTime":{ + "shape":"Timestamp", + "documentation":"

The time when the training of the version completed.

" + }, + "FailedReason":{ + "shape":"BoundedLengthString", + "documentation":"

The failure message if the training of the model version failed.

" + }, + "ModelMetrics":{ + "shape":"ModelMetrics", + "documentation":"

Shows an aggregated summary, in JSON format, of the model's performance within the evaluation time range. These metrics are created when evaluating the model.

" + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the last time the machine learning model version was updated.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

Indicates the time and date at which the machine learning model version was created.

" + }, + "ServerSideKmsKeyId":{ + "shape":"KmsKeyArn", + "documentation":"

The identifier of the KMS key key used to encrypt model version data by Amazon Lookout for Equipment.

" + }, + "OffCondition":{ + "shape":"OffCondition", + "documentation":"

Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.

" + }, + "SourceModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

If model version was imported, then this field is the arn of the source model version.

" + }, + "ImportJobStartTime":{ + "shape":"Timestamp", + "documentation":"

The date and time when the import job began. This field appears if the model version was imported.

" + }, + "ImportJobEndTime":{ + "shape":"Timestamp", + "documentation":"

The date and time when the import job completed. This field appears if the model version was imported.

" + }, + "ImportedDataSizeInBytes":{ + "shape":"DataSizeInBytes", + "documentation":"

The size in bytes of the imported data. This field appears if the model version was imported.

" + } + } + }, + "DescribeResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource that is associated with the resource policy.

" + } + } + }, + "DescribeResourcePolicyResponse":{ + "type":"structure", + "members":{ + "PolicyRevisionId":{ + "shape":"PolicyRevisionId", + "documentation":"

A unique identifier for a revision of the resource policy.

" + }, + "ResourcePolicy":{ + "shape":"Policy", + "documentation":"

The resource policy in a JSON-formatted string.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The time when the resource policy was created.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time when the resource policy was last modified.

" } } }, @@ -1619,6 +1976,122 @@ "min":1, "pattern":"\\p{ASCII}{1,256}" }, + "ImportDatasetRequest":{ + "type":"structure", + "required":[ + "SourceDatasetArn", + "ClientToken" + ], + "members":{ + "SourceDatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset to import.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the machine learning dataset to be created. If the dataset already exists, Amazon Lookout for Equipment overwrites the existing dataset. If you don't specify this field, it is filled with the name of the source dataset.

" + }, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "idempotencyToken":true + }, + "ServerSideKmsKeyId":{ + "shape":"NameOrArn", + "documentation":"

Provides the identifier of the KMS key key used to encrypt model data by Amazon Lookout for Equipment.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags associated with the dataset to be created.

" + } + } + }, + "ImportDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the created machine learning dataset.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset that was imported.

" + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"

The status of the ImportDataset operation.

" + }, + "JobId":{ + "shape":"IngestionJobId", + "documentation":"

A unique identifier for the job of importing the dataset.

" + } + } + }, + "ImportModelVersionRequest":{ + "type":"structure", + "required":[ + "SourceModelVersionArn", + "DatasetName", + "ClientToken" + ], + "members":{ + "SourceModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the model version to import.

" + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name for the machine learning model to be created. If the model already exists, Amazon Lookout for Equipment creates a new version. If you do not specify this field, it is filled with the name of the source model.

" + }, + "DatasetName":{ + "shape":"DatasetIdentifier", + "documentation":"

The name of the dataset for the machine learning model being imported.

" + }, + "LabelsInputConfiguration":{"shape":"LabelsInputConfiguration"}, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "idempotencyToken":true + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source being used to create the machine learning model.

" + }, + "ServerSideKmsKeyId":{ + "shape":"NameOrArn", + "documentation":"

Provides the identifier of the KMS key key used to encrypt model data by Amazon Lookout for Equipment.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags associated with the machine learning model to be created.

" + } + } + }, + "ImportModelVersionResponse":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name for the machine learning model.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the model being created.

" + }, + "ModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the model version being created.

" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model being created.

" + }, + "Status":{ + "shape":"ModelVersionStatus", + "documentation":"

The status of the ImportModelVersion operation.

" + } + } + }, "InferenceEventSummaries":{ "type":"list", "member":{"shape":"InferenceEventSummary"} @@ -1761,7 +2234,7 @@ }, "KmsKeyId":{ "shape":"NameOrArn", - "documentation":"

The ID number for the AWS KMS key used to encrypt the inference output.

" + "documentation":"

The ID number for the KMS key key used to encrypt the inference output.

" } }, "documentation":"

Specifies configuration information for the output results from for the inference, including KMS key ID and output S3 location.

" @@ -1908,7 +2381,8 @@ "enum":[ "IN_PROGRESS", "SUCCESS", - "FAILED" + "FAILED", + "IMPORT_IN_PROGRESS" ] }, "IngestionS3InputConfiguration":{ @@ -1925,7 +2399,7 @@ }, "KeyPattern":{ "shape":"KeyPattern", - "documentation":"

Pattern for matching the Amazon S3 files which will be used for ingestion. If no KeyPattern is provided, we will use the default hierarchy file structure, which is same as KeyPattern {prefix}/{component_name}/*

" + "documentation":"

The pattern for matching the Amazon S3 files that will be used for ingestion. If the schema was created previously without any KeyPattern, then the default KeyPattern {prefix}/{component_name}/* is used to download files from Amazon S3 according to the schema. This field is required when ingestion is being done for the first time.

Valid Values: {prefix}/{component_name}_* | {prefix}/{component_name}/* | {prefix}/{component_name}[DELIMITER]* (Allowed delimiters : space, dot, underscore, hyphen)

" } }, "documentation":"

Specifies S3 configuration information for the input data for the data ingestion job.

" @@ -2018,7 +2492,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"

The ARN of the label group.

" + "documentation":"

The Amazon Resource Name (ARN) of the label group.

" }, "CreatedAt":{ "shape":"Timestamp", @@ -2061,7 +2535,7 @@ }, "LabelGroupArn":{ "shape":"LabelGroupArn", - "documentation":"

The ARN of the label group.

" + "documentation":"

The Amazon Resource Name (ARN) of the label group.

" }, "StartTime":{ "shape":"Timestamp", @@ -2235,7 +2709,7 @@ }, "IntervalEndTime":{ "shape":"Timestamp", - "documentation":"

Returns all the inference events with an end start time equal to or greater than less than the end time given

" + "documentation":"

Returns all the inference events with an end start time equal to or greater than less than the end time given.

" } } }, @@ -2316,7 +2790,7 @@ }, "Status":{ "shape":"InferenceSchedulerStatus", - "documentation":"

Specifies the current status of the inference schedulers to list.

" + "documentation":"

Specifies the current status of the inference schedulers.

" } } }, @@ -2410,6 +2884,61 @@ } } }, + "ListModelVersionsRequest":{ + "type":"structure", + "required":["ModelName"], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

Then name of the machine learning model for which the model versions are to be listed.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results exceeds the limit that the response can display, the response returns an opaque pagination token indicating where to continue the listing of machine learning model versions. Use this token in the NextToken field in the request to list the next page of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Specifies the maximum number of machine learning model versions to list.

" + }, + "Status":{ + "shape":"ModelVersionStatus", + "documentation":"

Filter the results based on the current status of the model version.

" + }, + "SourceType":{ + "shape":"ModelVersionSourceType", + "documentation":"

Filter the results based on the way the model version was generated.

" + }, + "CreatedAtEndTime":{ + "shape":"Timestamp", + "documentation":"

Filter results to return all the model versions created before this time.

" + }, + "CreatedAtStartTime":{ + "shape":"Timestamp", + "documentation":"

Filter results to return all the model versions created after this time.

" + }, + "MaxModelVersion":{ + "shape":"ModelVersion", + "documentation":"

Specifies the highest version of the model to return in the list.

" + }, + "MinModelVersion":{ + "shape":"ModelVersion", + "documentation":"

Specifies the lowest version of the model to return in the list.

" + } + } + }, + "ListModelVersionsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the total number of results exceeds the limit that the response can display, the response returns an opaque pagination token indicating where to continue the listing of machine learning model versions. Use this token in the NextToken field in the request to list the next page of results.

" + }, + "ModelVersionSummaries":{ + "shape":"ModelVersionSummaries", + "documentation":"

Provides information on the specified model version, including the created time, model and dataset ARNs, and status.

" + } + } + }, "ListModelsRequest":{ "type":"structure", "members":{ @@ -2563,7 +3092,8 @@ "enum":[ "IN_PROGRESS", "SUCCESS", - "FAILED" + "FAILED", + "IMPORT_IN_PROGRESS" ] }, "ModelSummaries":{ @@ -2596,10 +3126,84 @@ "CreatedAt":{ "shape":"Timestamp", "documentation":"

The time at which the specific model was created.

" + }, + "ActiveModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The model version that the inference scheduler uses to run an inference execution.

" + }, + "ActiveModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the model version that is set as active. The active model version is the model version that the inference scheduler uses to run an inference execution.

" } }, "documentation":"

Provides information about the specified ML model, including dataset and model names and ARNs, as well as status.

" }, + "ModelVersion":{ + "type":"long", + "min":1 + }, + "ModelVersionArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/.+\\/.+\\/model-version\\/[0-9]{1,}$" + }, + "ModelVersionSourceType":{ + "type":"string", + "enum":[ + "TRAINING", + "RETRAINING", + "IMPORT" + ] + }, + "ModelVersionStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESS", + "FAILED", + "IMPORT_IN_PROGRESS", + "CANCELED" + ] + }, + "ModelVersionSummaries":{ + "type":"list", + "member":{"shape":"ModelVersionSummary"} + }, + "ModelVersionSummary":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the model that this model version is a version of.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the model that this model version is a version of.

" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the model.

" + }, + "ModelVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the model version.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The time when this model version was created.

" + }, + "Status":{ + "shape":"ModelVersionStatus", + "documentation":"

The current status of the model version.

" + }, + "SourceType":{ + "shape":"ModelVersionSourceType", + "documentation":"

Indicates how this model version was generated.

" + } + }, + "documentation":"

Contains information about the specific model version.

" + }, "MonotonicValues":{ "type":"structure", "required":["Status"], @@ -2650,6 +3254,63 @@ "max":2048, "min":1 }, + "Policy":{ + "type":"string", + "max":20000, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "PolicyRevisionId":{ + "type":"string", + "max":50, + "pattern":"[0-9A-Fa-f]+" + }, + "PutResourcePolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "ResourcePolicy", + "ClientToken" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource for which the policy is being created.

" + }, + "ResourcePolicy":{ + "shape":"Policy", + "documentation":"

The JSON-formatted resource policy to create.

" + }, + "PolicyRevisionId":{ + "shape":"PolicyRevisionId", + "documentation":"

A unique identifier for a revision of the resource policy.

" + }, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "idempotencyToken":true + } + } + }, + "PutResourcePolicyResponse":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource for which the policy was created.

" + }, + "PolicyRevisionId":{ + "shape":"PolicyRevisionId", + "documentation":"

A unique identifier for a revision of the resource policy.

" + } + } + }, + "ResourceArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:.+" + }, "ResourceNotFoundException":{ "type":"structure", "required":["Message"], @@ -2684,7 +3345,7 @@ }, "Key":{ "shape":"S3Key", - "documentation":"

The AWS Key Management Service (AWS KMS) key being used to encrypt the S3 object. Without this key, data in the bucket is not accessible.

" + "documentation":"

The Amazon Web Services Key Management Service (KMS key) key being used to encrypt the S3 object. Without this key, data in the bucket is not accessible.

" } }, "documentation":"

Contains information about an S3 bucket.

" @@ -3028,6 +3689,52 @@ "members":{ } }, + "UpdateActiveModelVersionRequest":{ + "type":"structure", + "required":[ + "ModelName", + "ModelVersion" + ], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the machine learning model for which the active model version is being set.

" + }, + "ModelVersion":{ + "shape":"ModelVersion", + "documentation":"

The version of the machine learning model for which the active model version is being set.

" + } + } + }, + "UpdateActiveModelVersionResponse":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the machine learning model for which the active model version was set.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the machine learning model for which the active model version was set.

" + }, + "CurrentActiveVersion":{ + "shape":"ModelVersion", + "documentation":"

The version that is currently active of the machine learning model for which the active model version was set.

" + }, + "PreviousActiveVersion":{ + "shape":"ModelVersion", + "documentation":"

The previous version that was active of the machine learning model for which the active model version was set.

" + }, + "CurrentActiveVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the machine learning model version that is the current active model version.

" + }, + "PreviousActiveVersionArn":{ + "shape":"ModelVersionArn", + "documentation":"

The Amazon Resource Name (ARN) of the machine learning model version that was the previous active model version.

" + } + } + }, "UpdateInferenceSchedulerRequest":{ "type":"structure", "required":["InferenceSchedulerName"], @@ -3078,7 +3785,7 @@ "members":{ "Message":{"shape":"BoundedLengthString"} }, - "documentation":"

The input fails to satisfy constraints specified by Amazon Lookout for Equipment or a related AWS service that's being utilized.

", + "documentation":"

The input fails to satisfy constraints specified by Amazon Lookout for Equipment or a related Amazon Web Services service that's being utilized.

", "exception":true } }, diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index cc2a92bf9a5..7eadcb29a09 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index 6a678dac656..44f0a16a00e 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/m2/pom.xml b/services/m2/pom.xml index 4117064c9d1..4e595cf5fa0 100644 --- a/services/m2/pom.xml +++ b/services/m2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT m2 AWS Java SDK :: Services :: M2 diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index 7a350351302..e21c1a8a41d 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index 56eeb57590d..dfab482f65d 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT macie AWS Java SDK :: Services :: Macie diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index 5f92e247026..91375a5dd68 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index aaf5dd43fef..1d235f59ab8 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/managedblockchainquery/pom.xml b/services/managedblockchainquery/pom.xml new file mode 100644 index 00000000000..b50a8479cf9 --- /dev/null +++ b/services/managedblockchainquery/pom.xml @@ -0,0 +1,60 @@ + + + + + 4.0.0 + + software.amazon.awssdk + services + 2.20.126-SNAPSHOT + + managedblockchainquery + AWS Java SDK :: Services :: Managed Blockchain Query + The AWS Java SDK for Managed Blockchain Query module holds the client classes that are used for + communicating with Managed Blockchain Query. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.managedblockchainquery + + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + diff --git a/services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 00000000000..1f94e43d857 --- /dev/null +++ b/services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://managedblockchain-query-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://managedblockchain-query-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://managedblockchain-query.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://managedblockchain-query.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-tests.json b/services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 00000000000..7287b49a660 --- /dev/null +++ b/services/managedblockchainquery/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://managedblockchain-query.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/managedblockchainquery/src/main/resources/codegen-resources/paginators-1.json b/services/managedblockchainquery/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 00000000000..c8f2452b78f --- /dev/null +++ b/services/managedblockchainquery/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListTokenBalances": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "tokenBalances" + }, + "ListTransactionEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "events" + }, + "ListTransactions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "transactions" + } + } +} diff --git a/services/managedblockchainquery/src/main/resources/codegen-resources/service-2.json b/services/managedblockchainquery/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 00000000000..aa6b9529c07 --- /dev/null +++ b/services/managedblockchainquery/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1018 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-05-04", + "endpointPrefix":"managedblockchain-query", + "jsonVersion":"1.1", + "protocol":"rest-json", + "ripServiceName":"chainquery", + "serviceFullName":"Amazon Managed Blockchain Query", + "serviceId":"ManagedBlockchain Query", + "signatureVersion":"v4", + "signingName":"managedblockchain-query", + "uid":"managedblockchain-query-2023-05-04" + }, + "operations":{ + "BatchGetTokenBalance":{ + "name":"BatchGetTokenBalance", + "http":{ + "method":"POST", + "requestUri":"/batch-get-token-balance", + "responseCode":200 + }, + "input":{"shape":"BatchGetTokenBalanceInput"}, + "output":{"shape":"BatchGetTokenBalanceOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Gets the token balance for a batch of tokens by using the GetTokenBalance action for every token in the request.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" + }, + "GetTokenBalance":{ + "name":"GetTokenBalance", + "http":{ + "method":"POST", + "requestUri":"/get-token-balance", + "responseCode":200 + }, + "input":{"shape":"GetTokenBalanceInput"}, + "output":{"shape":"GetTokenBalanceOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Gets the balance of a specific token, including native tokens, for a given address (wallet or contract) on the blockchain.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" + }, + "GetTransaction":{ + "name":"GetTransaction", + "http":{ + "method":"POST", + "requestUri":"/get-transaction", + "responseCode":200 + }, + "input":{"shape":"GetTransactionInput"}, + "output":{"shape":"GetTransactionOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Get the details of a transaction.

" + }, + "ListTokenBalances":{ + "name":"ListTokenBalances", + "http":{ + "method":"POST", + "requestUri":"/list-token-balances", + "responseCode":200 + }, + "input":{"shape":"ListTokenBalancesInput"}, + "output":{"shape":"ListTokenBalancesOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

This action returns the following for a given a blockchain network:

  • Lists all token balances owned by an address (either a contact address or a wallet address).

  • Lists all token balances for all tokens created by a contract.

  • Lists all token balances for a given token.

You must always specify the network property of the tokenFilter when using this operation.

" + }, + "ListTransactionEvents":{ + "name":"ListTransactionEvents", + "http":{ + "method":"POST", + "requestUri":"/list-transaction-events", + "responseCode":200 + }, + "input":{"shape":"ListTransactionEventsInput"}, + "output":{"shape":"ListTransactionEventsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

An array of TransactionEvent objects. Each object contains details about the transaction event.

" + }, + "ListTransactions":{ + "name":"ListTransactions", + "http":{ + "method":"POST", + "requestUri":"/list-transactions", + "responseCode":200 + }, + "input":{"shape":"ListTransactionsInput"}, + "output":{"shape":"ListTransactionsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Lists all of the transactions on a given wallet address or to a specific contract.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"

The container for the exception message.

" + } + }, + "documentation":"

The Amazon Web Services account doesn’t have access to this resource.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "BatchGetTokenBalanceErrorItem":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "errorType" + ], + "members":{ + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "errorCode":{ + "shape":"String", + "documentation":"

The error code associated with the error.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

The message associated with the error.

" + }, + "errorType":{ + "shape":"ErrorType", + "documentation":"

The type of error.

" + } + }, + "documentation":"

Error generated from a failed BatchGetTokenBalance request.

" + }, + "BatchGetTokenBalanceErrors":{ + "type":"list", + "member":{"shape":"BatchGetTokenBalanceErrorItem"}, + "max":10, + "min":0 + }, + "BatchGetTokenBalanceInput":{ + "type":"structure", + "members":{ + "getTokenBalanceInputs":{ + "shape":"GetTokenBalanceInputList", + "documentation":"

An array of GetTokenBalanceInput objects whose balance is being requested.

" + } + } + }, + "BatchGetTokenBalanceInputItem":{ + "type":"structure", + "required":[ + "tokenIdentifier", + "ownerIdentifier" + ], + "members":{ + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "atBlockchainInstant":{"shape":"BlockchainInstant"} + }, + "documentation":"

The container for the input for getting a token balance.

" + }, + "BatchGetTokenBalanceOutput":{ + "type":"structure", + "required":[ + "tokenBalances", + "errors" + ], + "members":{ + "tokenBalances":{ + "shape":"BatchGetTokenBalanceOutputList", + "documentation":"

An array of BatchGetTokenBalanceOutputItem objects returned by the response.

" + }, + "errors":{ + "shape":"BatchGetTokenBalanceErrors", + "documentation":"

An array of BatchGetTokenBalanceErrorItem objects returned from the request.

" + } + } + }, + "BatchGetTokenBalanceOutputItem":{ + "type":"structure", + "required":[ + "balance", + "atBlockchainInstant" + ], + "members":{ + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "balance":{ + "shape":"String", + "documentation":"

The container for the token balance.

" + }, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "lastUpdatedTime":{"shape":"BlockchainInstant"} + }, + "documentation":"

The container for the properties of a token balance output.

" + }, + "BatchGetTokenBalanceOutputList":{ + "type":"list", + "member":{"shape":"BatchGetTokenBalanceOutputItem"}, + "max":10, + "min":0 + }, + "BlockHash":{ + "type":"string", + "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" + }, + "BlockchainInstant":{ + "type":"structure", + "members":{ + "time":{ + "shape":"Timestamp", + "documentation":"

The container of the Timestamp of the blockchain instant.

This timestamp will only be recorded up to the second.

" + } + }, + "documentation":"

The container for time.

" + }, + "ChainAddress":{ + "type":"string", + "pattern":"[-A-Za-z0-9]{13,74}" + }, + "ErrorType":{ + "type":"string", + "enum":[ + "VALIDATION_EXCEPTION", + "RESOURCE_NOT_FOUND_EXCEPTION" + ] + }, + "ExceptionMessage":{ + "type":"string", + "min":1 + }, + "GetTokenBalanceInput":{ + "type":"structure", + "required":[ + "tokenIdentifier", + "ownerIdentifier" + ], + "members":{ + "tokenIdentifier":{ + "shape":"TokenIdentifier", + "documentation":"

The container for the identifier for the token, including the unique token ID and its blockchain network.

" + }, + "ownerIdentifier":{ + "shape":"OwnerIdentifier", + "documentation":"

The container for the identifier for the owner.

" + }, + "atBlockchainInstant":{ + "shape":"BlockchainInstant", + "documentation":"

The time for when the TokenBalance is requested or the current time if a time is not provided in the request.

This time will only be recorded up to the second.

" + } + } + }, + "GetTokenBalanceInputList":{ + "type":"list", + "member":{"shape":"BatchGetTokenBalanceInputItem"}, + "max":10, + "min":1 + }, + "GetTokenBalanceOutput":{ + "type":"structure", + "required":[ + "balance", + "atBlockchainInstant" + ], + "members":{ + "ownerIdentifier":{"shape":"OwnerIdentifier"}, + "tokenIdentifier":{"shape":"TokenIdentifier"}, + "balance":{ + "shape":"String", + "documentation":"

The container for the token balance.

" + }, + "atBlockchainInstant":{"shape":"BlockchainInstant"}, + "lastUpdatedTime":{"shape":"BlockchainInstant"} + } + }, + "GetTransactionInput":{ + "type":"structure", + "required":[ + "transactionHash", + "network" + ], + "members":{ + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + }, + "network":{ + "shape":"QueryNetwork", + "documentation":"

The blockchain network where the transaction occurred.

" + } + } + }, + "GetTransactionOutput":{ + "type":"structure", + "required":["transaction"], + "members":{ + "transaction":{ + "shape":"Transaction", + "documentation":"

Contains the details of the transaction.

" + } + } + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"

The container for the exception message.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The container of the retryAfterSeconds value.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

The request processing has failed because of an internal error in the service.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListTokenBalancesInput":{ + "type":"structure", + "required":["tokenFilter"], + "members":{ + "ownerFilter":{ + "shape":"OwnerFilter", + "documentation":"

The contract or wallet address on the blockchain network by which to filter the request. You must specify the address property of the ownerFilter when listing balances of tokens owned by the address.

" + }, + "tokenFilter":{ + "shape":"TokenFilter", + "documentation":"

The contract address or a token identifier on the blockchain network by which to filter the request. You must specify the contractAddress property of this container when listing tokens minted by a contract.

You must always specify the network property of this container when using this operation.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + }, + "maxResults":{ + "shape":"ListTokenBalancesInputMaxResultsInteger", + "documentation":"

The maximum number of token balances to return.

" + } + } + }, + "ListTokenBalancesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":250, + "min":1 + }, + "ListTokenBalancesOutput":{ + "type":"structure", + "required":["tokenBalances"], + "members":{ + "tokenBalances":{ + "shape":"TokenBalanceList", + "documentation":"

An array of TokenBalance objects. Each object contains details about the token balance.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + } + } + }, + "ListTransactionEventsInput":{ + "type":"structure", + "required":[ + "transactionHash", + "network" + ], + "members":{ + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + }, + "network":{ + "shape":"QueryNetwork", + "documentation":"

The blockchain network where the transaction events occurred.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + }, + "maxResults":{ + "shape":"ListTransactionEventsInputMaxResultsInteger", + "documentation":"

The maximum number of transaction events to list.

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + } + } + }, + "ListTransactionEventsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":250, + "min":1 + }, + "ListTransactionEventsOutput":{ + "type":"structure", + "required":["events"], + "members":{ + "events":{ + "shape":"TransactionEventList", + "documentation":"

An array of TransactionEvent objects. Each object contains details about the transaction events.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + } + } + }, + "ListTransactionsInput":{ + "type":"structure", + "required":[ + "address", + "network" + ], + "members":{ + "address":{ + "shape":"ChainAddress", + "documentation":"

The address (either a contract or wallet), whose transactions are being requested.

" + }, + "network":{ + "shape":"QueryNetwork", + "documentation":"

The blockchain network where the transactions occurred.

" + }, + "fromBlockchainInstant":{"shape":"BlockchainInstant"}, + "toBlockchainInstant":{"shape":"BlockchainInstant"}, + "sort":{ + "shape":"ListTransactionsSort", + "documentation":"

Sorts items in an ascending order if the first page starts at fromTime. Sorts items in a descending order if the first page starts at toTime.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + }, + "maxResults":{ + "shape":"ListTransactionsInputMaxResultsInteger", + "documentation":"

The maximum number of transactions to list.

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + } + } + }, + "ListTransactionsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":250, + "min":1 + }, + "ListTransactionsOutput":{ + "type":"structure", + "required":["transactions"], + "members":{ + "transactions":{ + "shape":"TransactionOutputList", + "documentation":"

The array of transactions returned by the request.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token that indicates the next set of results to retrieve.

" + } + } + }, + "ListTransactionsSort":{ + "type":"structure", + "members":{ + "sortBy":{ + "shape":"ListTransactionsSortBy", + "documentation":"

Defaults to the value TRANSACTION_TIMESTAMP.

" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"

The container for the sort order for ListTransactions. The SortOrder field only accepts the values ASCENDING and DESCENDING. Not providing SortOrder will default to ASCENDING.

" + } + }, + "documentation":"

The container for determining how the list transaction result will be sorted.

" + }, + "ListTransactionsSortBy":{ + "type":"string", + "enum":["TRANSACTION_TIMESTAMP"] + }, + "Long":{ + "type":"long", + "box":true + }, + "NextToken":{ + "type":"string", + "max":131070, + "min":0 + }, + "OwnerFilter":{ + "type":"structure", + "required":["address"], + "members":{ + "address":{ + "shape":"ChainAddress", + "documentation":"

The contract or wallet address.

" + } + }, + "documentation":"

The container for the owner information to filter by.

" + }, + "OwnerIdentifier":{ + "type":"structure", + "required":["address"], + "members":{ + "address":{ + "shape":"ChainAddress", + "documentation":"

The contract or wallet address for the owner.

" + } + }, + "documentation":"

The container for the identifier of the owner.

" + }, + "QueryNetwork":{ + "type":"string", + "enum":[ + "ETHEREUM_MAINNET", + "BITCOIN_MAINNET" + ] + }, + "QueryTokenId":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{1,66}" + }, + "QueryTransactionEventType":{ + "type":"string", + "enum":[ + "ERC20_TRANSFER", + "ERC20_MINT", + "ERC20_BURN", + "ERC20_DEPOSIT", + "ERC20_WITHDRAWAL", + "ERC721_TRANSFER", + "ERC1155_TRANSFER", + "BITCOIN_VIN", + "BITCOIN_VOUT", + "INTERNAL_ETH_TRANSFER", + "ETH_TRANSFER" + ] + }, + "QueryTransactionHash":{ + "type":"string", + "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" + }, + "QueryTransactionStatus":{ + "type":"string", + "enum":[ + "FINAL", + "FAILED" + ] + }, + "QuotaCode":{"type":"string"}, + "ResourceId":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"

The container for the exception message.

" + }, + "resourceId":{ + "shape":"ResourceId", + "documentation":"

The resourceId of the resource that caused the exception.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The resourceType of the resource that caused the exception.

" + } + }, + "documentation":"

The resource was not found.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":["collection"] + }, + "ServiceCode":{"type":"string"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"

The container for the exception message.

" + }, + "resourceId":{ + "shape":"ResourceId", + "documentation":"

The resourceId of the resource that caused the exception.

" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"

The resourceType of the resource that caused the exception.

" + }, + "serviceCode":{ + "shape":"ServiceCode", + "documentation":"

The container for the serviceCode.

" + }, + "quotaCode":{ + "shape":"QuotaCode", + "documentation":"

The container for the quotaCode.

" + } + }, + "documentation":"

The service quota has been exceeded for this resource.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":[ + "message", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"

The container for the exception message.

" + }, + "serviceCode":{ + "shape":"ServiceCode", + "documentation":"

The container for the serviceCode.

" + }, + "quotaCode":{ + "shape":"QuotaCode", + "documentation":"

The container for the quotaCode.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The container of the retryAfterSeconds value.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

The request or operation couldn't be performed because a service is throttling requests. The most common source of throttling errors is when you create resources that exceed your service limit for this resource type. Request a limit increase or delete unused resources, if possible.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "TokenBalance":{ + "type":"structure", + "required":[ + "balance", + "atBlockchainInstant" + ], + "members":{ + "ownerIdentifier":{ + "shape":"OwnerIdentifier", + "documentation":"

The container for the identifier of the owner.

" + }, + "tokenIdentifier":{ + "shape":"TokenIdentifier", + "documentation":"

The identifier for the token, including the unique token ID and its blockchain network.

" + }, + "balance":{ + "shape":"String", + "documentation":"

The container of the token balance.

" + }, + "atBlockchainInstant":{ + "shape":"BlockchainInstant", + "documentation":"

The time for when the TokenBalance is requested or the current time if a time is not provided in the request.

This time will only be recorded up to the second.

" + }, + "lastUpdatedTime":{ + "shape":"BlockchainInstant", + "documentation":"

The timestamp of the last transaction at which the balance for the token in the wallet was updated.

" + } + }, + "documentation":"

The balance of the token.

" + }, + "TokenBalanceList":{ + "type":"list", + "member":{"shape":"TokenBalance"}, + "max":250, + "min":0 + }, + "TokenFilter":{ + "type":"structure", + "required":["network"], + "members":{ + "network":{ + "shape":"QueryNetwork", + "documentation":"

The blockchain network of the token.

" + }, + "contractAddress":{ + "shape":"ChainAddress", + "documentation":"

This is the address of the contract.

" + }, + "tokenId":{ + "shape":"QueryTokenId", + "documentation":"

The unique identifier of the token.

" + } + }, + "documentation":"

The container of the token filter like the contract address on a given blockchain network or a unique token identifier on a given blockchain network.

You must always specify the network property of this container when using this operation.

" + }, + "TokenIdentifier":{ + "type":"structure", + "required":["network"], + "members":{ + "network":{ + "shape":"QueryNetwork", + "documentation":"

The blockchain network of the token.

" + }, + "contractAddress":{ + "shape":"ChainAddress", + "documentation":"

This is the token's contract address.

" + }, + "tokenId":{ + "shape":"QueryTokenId", + "documentation":"

The unique identifier of the token.

" + } + }, + "documentation":"

The container for the identifier for the token including the unique token ID and its blockchain network.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" + }, + "Transaction":{ + "type":"structure", + "required":[ + "network", + "transactionHash", + "transactionTimestamp", + "transactionIndex", + "numberOfTransactions", + "status", + "to" + ], + "members":{ + "network":{ + "shape":"QueryNetwork", + "documentation":"

The blockchain network where the transaction occured.

" + }, + "blockHash":{ + "shape":"BlockHash", + "documentation":"

The block hash is a unique identifier for a block. It is a fixed-size string that is calculated by using the information in the block. The block hash is used to verify the integrity of the data in the block.

" + }, + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + }, + "blockNumber":{ + "shape":"String", + "documentation":"

The block number in which the transaction is recorded.

" + }, + "transactionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The Timestamp of the transaction.

" + }, + "transactionIndex":{ + "shape":"Long", + "documentation":"

The index of the transaction within a blockchain.

" + }, + "numberOfTransactions":{ + "shape":"Long", + "documentation":"

The number of transactions in the block.

" + }, + "status":{ + "shape":"QueryTransactionStatus", + "documentation":"

The status of the transaction.

" + }, + "to":{ + "shape":"ChainAddress", + "documentation":"

The identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + }, + "from":{ + "shape":"ChainAddress", + "documentation":"

The initiator of the transaction. It is either in the form a public key or a contract address.

" + }, + "contractAddress":{ + "shape":"ChainAddress", + "documentation":"

The blockchain address for the contract.

" + }, + "gasUsed":{ + "shape":"String", + "documentation":"

The amount of gas used for the transaction.

" + }, + "cumulativeGasUsed":{ + "shape":"String", + "documentation":"

The amount of gas used up to the specified point in the block.

" + }, + "effectiveGasPrice":{ + "shape":"String", + "documentation":"

The effective gas price.

" + }, + "signatureV":{ + "shape":"Integer", + "documentation":"

The signature of the transaction. The Z coordinate of a point V.

" + }, + "signatureR":{ + "shape":"String", + "documentation":"

The signature of the transaction. The X coordinate of a point R.

" + }, + "signatureS":{ + "shape":"String", + "documentation":"

The signature of the transaction. The Y coordinate of a point S.

" + }, + "transactionFee":{ + "shape":"String", + "documentation":"

The transaction fee.

" + }, + "transactionId":{ + "shape":"String", + "documentation":"

The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + } + }, + "documentation":"

There are two possible types of transactions used for this data type:

  • A Bitcoin transaction is a movement of BTC from one address to another.

  • An Ethereum transaction refers to an action initiated by an externally owned account, which is an account managed by a human, not a contract. For example, if Bob sends Alice 1 ETH, Bob's account must be debited and Alice's must be credited. This state-changing action occurs within a transaction.

" + }, + "TransactionEvent":{ + "type":"structure", + "required":[ + "network", + "transactionHash", + "eventType" + ], + "members":{ + "network":{ + "shape":"QueryNetwork", + "documentation":"

The blockchain network where the transaction occurred.

" + }, + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + }, + "eventType":{ + "shape":"QueryTransactionEventType", + "documentation":"

The type of transaction event.

" + }, + "from":{ + "shape":"ChainAddress", + "documentation":"

The wallet address initiating the transaction. It can either be a public key or a contract.

" + }, + "to":{ + "shape":"ChainAddress", + "documentation":"

The wallet address receiving the transaction. It can either be a public key or a contract.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value that was transacted.

" + }, + "contractAddress":{ + "shape":"ChainAddress", + "documentation":"

The blockchain address. for the contract

" + }, + "tokenId":{ + "shape":"QueryTokenId", + "documentation":"

The unique identifier for the token involved in the transaction.

" + }, + "transactionId":{ + "shape":"String", + "documentation":"

The unique identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + }, + "voutIndex":{ + "shape":"Integer", + "documentation":"

The position of the vout in the transaction output list.

" + } + }, + "documentation":"

The container for the properties of a transaction event.

" + }, + "TransactionEventList":{ + "type":"list", + "member":{"shape":"TransactionEvent"}, + "max":250, + "min":0 + }, + "TransactionOutputItem":{ + "type":"structure", + "required":[ + "transactionHash", + "network", + "transactionTimestamp" + ], + "members":{ + "transactionHash":{ + "shape":"QueryTransactionHash", + "documentation":"

The hash of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" + }, + "network":{ + "shape":"QueryNetwork", + "documentation":"

The blockchain network where the transaction occurred.

" + }, + "transactionTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time when the transaction occurred.

" + } + }, + "documentation":"

The container of the transaction output.

" + }, + "TransactionOutputList":{ + "type":"list", + "member":{"shape":"TransactionOutputItem"}, + "max":250, + "min":0 + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"

The container for the exception message.

" + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

The container for the reason for the exception

" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

The container for the fieldList of the exception.

" + } + }, + "documentation":"

The resource passed is invalid.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the field that triggered the ValidationException.

" + }, + "message":{ + "shape":"String", + "documentation":"

The ValidationException message.

" + } + }, + "documentation":"

The resource passed is invalid.

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + } + }, + "documentation":"

Amazon Managed Blockchain (AMB) Query provides you with convenient access to multi-blockchain network data, which makes it easier for you to extract contextual data related to blockchain activity. You can use AMB Query to read data from public blockchain networks, such as Bitcoin Mainnet and Ethereum Mainnet. You can also get information such as the current and historical balances of addresses, or you can get a list of blockchain transactions for a given time period. Additionally, you can get details of a given transaction, such as transaction events, which you can further analyze or use in business logic for your applications.

" +} diff --git a/services/managedblockchainquery/src/main/resources/codegen-resources/waiters-2.json b/services/managedblockchainquery/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 00000000000..13f60ee66be --- /dev/null +++ b/services/managedblockchainquery/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index c4be7dac003..56bf88cc674 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index a137dc77b48..0a5d01b6bb2 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 4d82ce7955c..20b908726c8 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index 53e40936bf0..8711e7594cc 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index 31e0c926e23..b4ebc5183db 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index 8db63780f75..e3cfe40b7a4 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index 0b7a288c566..c6ee5fd641d 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -1217,7 +1217,7 @@ "Bitrate": { "shape": "__integerMin6000Max1024000", "locationName": "bitrate", - "documentation": "Specify the average bitrate in bits per second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values that you choose for Profile (codecProfile), Bitrate control mode (codingMode), and Sample rate (sampleRate). Default values depend on Bitrate control mode and Profile." + "documentation": "Specify the average bitrate in bits per second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values that you choose for Profile, Bitrate control mode, and Sample rate. Default values depend on Bitrate control mode and Profile." }, "CodecProfile": { "shape": "AacCodecProfile", @@ -1255,7 +1255,7 @@ "documentation": "VBR Quality Level - Only used if rate_control_mode is VBR." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode." + "documentation": "Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode." }, "AacSpecification": { "type": "string", @@ -1301,7 +1301,7 @@ }, "Ac3DynamicRangeCompressionLine": { "type": "string", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "FILM_STANDARD", "FILM_LIGHT", @@ -1313,7 +1313,7 @@ }, "Ac3DynamicRangeCompressionProfile": { "type": "string", - "documentation": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile (DynamicRangeCompressionProfile). The mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None (NONE) to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) to set the profile to Dolby's film standard profile for all operating modes.", + "documentation": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile. The mode-specific settings are Dynamic range compression profile, line mode and Dynamic range compression profile, RF mode. Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None to leave out DRC signaling. Keep the default Film standard to set the profile to Dolby's film standard profile for all operating modes.", "enum": [ "FILM_STANDARD", "NONE" @@ -1321,7 +1321,7 @@ }, "Ac3DynamicRangeCompressionRf": { "type": "string", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "FILM_STANDARD", "FILM_LIGHT", @@ -1373,17 +1373,17 @@ "DynamicRangeCompressionLine": { "shape": "Ac3DynamicRangeCompressionLine", "locationName": "dynamicRangeCompressionLine", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "DynamicRangeCompressionProfile": { "shape": "Ac3DynamicRangeCompressionProfile", "locationName": "dynamicRangeCompressionProfile", - "documentation": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile (DynamicRangeCompressionProfile). The mode-specific settings are Dynamic range compression profile, line mode (dynamicRangeCompressionLine) and Dynamic range compression profile, RF mode (dynamicRangeCompressionRf). Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None (NONE) to leave out DRC signaling. Keep the default Film standard (FILM_STANDARD) to set the profile to Dolby's film standard profile for all operating modes." + "documentation": "When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile. The mode-specific settings are Dynamic range compression profile, line mode and Dynamic range compression profile, RF mode. Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None to leave out DRC signaling. Keep the default Film standard to set the profile to Dolby's film standard profile for all operating modes." }, "DynamicRangeCompressionRf": { "shape": "Ac3DynamicRangeCompressionRf", "locationName": "dynamicRangeCompressionRf", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "LfeFilter": { "shape": "Ac3LfeFilter", @@ -1401,7 +1401,7 @@ "documentation": "This value is always 48000. It represents the sample rate in Hz." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AC3." + "documentation": "Required when you set Codec to the value AC3." }, "AccelerationMode": { "type": "string", @@ -1479,7 +1479,7 @@ }, "AfdSignaling": { "type": "string", - "documentation": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", + "documentation": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data.", "enum": [ "NONE", "AUTO", @@ -1492,7 +1492,7 @@ "BitDepth": { "shape": "__integerMin16Max24", "locationName": "bitDepth", - "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." + "documentation": "Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track." }, "Channels": { "shape": "__integerMin1Max64", @@ -1505,7 +1505,7 @@ "documentation": "Sample rate in hz." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AIFF." + "documentation": "Required when you set Codec to the value AIFF." }, "AllowedRenditionSize": { "type": "structure", @@ -1538,7 +1538,7 @@ }, "AncillaryConvert608To708": { "type": "string", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "enum": [ "UPCONVERT", "DISABLED" @@ -1550,7 +1550,7 @@ "Convert608To708": { "shape": "AncillaryConvert608To708", "locationName": "convert608To708", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "SourceAncillaryChannelNumber": { "shape": "__integerMin1Max4", @@ -1633,7 +1633,7 @@ }, "AudioCodec": { "type": "string", - "documentation": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output", + "documentation": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output", "enum": [ "AAC", "MP2", @@ -1654,37 +1654,37 @@ "AacSettings": { "shape": "AacSettings", "locationName": "aacSettings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode (rateControlMode) to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality (vbrQuality). In CBR mode, you use the setting Bitrate (bitrate). Defaults and valid values depend on the rate control mode." + "documentation": "Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode." }, "Ac3Settings": { "shape": "Ac3Settings", "locationName": "ac3Settings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AC3." + "documentation": "Required when you set Codec to the value AC3." }, "AiffSettings": { "shape": "AiffSettings", "locationName": "aiffSettings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value AIFF." + "documentation": "Required when you set Codec to the value AIFF." }, "Codec": { "shape": "AudioCodec", "locationName": "codec", - "documentation": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough (PASSTHROUGH) applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output" + "documentation": "Choose the audio codec for this output. Note that the option Dolby Digital passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output" }, "Eac3AtmosSettings": { "shape": "Eac3AtmosSettings", "locationName": "eac3AtmosSettings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3_ATMOS." + "documentation": "Required when you set Codec to the value EAC3_ATMOS." }, "Eac3Settings": { "shape": "Eac3Settings", "locationName": "eac3Settings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3." + "documentation": "Required when you set Codec to the value EAC3." }, "Mp2Settings": { "shape": "Mp2Settings", "locationName": "mp2Settings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2." + "documentation": "Required when you set Codec to the value MP2." }, "Mp3Settings": { "shape": "Mp3Settings", @@ -1704,7 +1704,7 @@ "WavSettings": { "shape": "WavSettings", "locationName": "wavSettings", - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value WAV." + "documentation": "Required when you set Codec to the value WAV." } }, "documentation": "Settings related to audio encoding. The settings in this group vary depending on the value that you choose for your audio codec." @@ -1753,7 +1753,7 @@ "CustomLanguageCode": { "shape": "__stringPatternAZaZ23AZaZ", "locationName": "customLanguageCode", - "documentation": "Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control (AudioLanguageCodeControl) to Use configured (USE_CONFIGURED). The service also uses your specified custom language code when you set Language code control (AudioLanguageCodeControl) to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming." + "documentation": "Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control to Use configured. The service also uses your specified custom language code when you set Language code control to Follow input, but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming." }, "LanguageCode": { "shape": "LanguageCode", @@ -1763,7 +1763,7 @@ "LanguageCodeControl": { "shape": "AudioLanguageCodeControl", "locationName": "languageCodeControl", - "documentation": "Specify which source for language code takes precedence for this audio track. When you choose Follow input (FOLLOW_INPUT), the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code (languageCode or customLanguageCode). When you choose Use configured (USE_CONFIGURED), the service uses the language code that you specify." + "documentation": "Specify which source for language code takes precedence for this audio track. When you choose Follow input, the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code. When you choose Use configured, the service uses the language code that you specify." }, "RemixSettings": { "shape": "RemixSettings", @@ -1790,7 +1790,7 @@ }, "AudioLanguageCodeControl": { "type": "string", - "documentation": "Specify which source for language code takes precedence for this audio track. When you choose Follow input (FOLLOW_INPUT), the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code (languageCode or customLanguageCode). When you choose Use configured (USE_CONFIGURED), the service uses the language code that you specify.", + "documentation": "Specify which source for language code takes precedence for this audio track. When you choose Follow input, the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code. When you choose Use configured, the service uses the language code that you specify.", "enum": [ "FOLLOW_INPUT", "USE_CONFIGURED" @@ -1861,7 +1861,7 @@ "TargetLkfs": { "shape": "__doubleMinNegative59Max0", "locationName": "targetLkfs", - "documentation": "When you use Audio normalization (AudioNormalizationSettings), optionally use this setting to specify a target loudness. If you don't specify a value here, the encoder chooses a value for you, based on the algorithm that you choose for Algorithm (algorithm). If you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS." + "documentation": "When you use Audio normalization, optionally use this setting to specify a target loudness. If you don't specify a value here, the encoder chooses a value for you, based on the algorithm that you choose for Algorithm. If you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS." }, "TruePeakLimiterThreshold": { "shape": "__doubleMinNegative8Max0", @@ -1917,7 +1917,7 @@ "ProgramSelection": { "shape": "__integerMin0Max8", "locationName": "programSelection", - "documentation": "Use this setting for input streams that contain Dolby E, to have the service extract specific program data from the track. To select multiple programs, create multiple selectors with the same Track and different Program numbers. In the console, this setting is visible when you set Selector type to Track. Choose the program number from the dropdown list. If you are sending a JSON file, provide the program ID, which is part of the audio metadata. If your input file has incorrect metadata, you can choose All channels instead of a program number to have the service ignore the program IDs and include all the programs in the track." + "documentation": "Use this setting for input streams that contain Dolby E, to have the service extract specific program data from the track. To select multiple programs, create multiple selectors with the same Track and different Program numbers. In the console, this setting is visible when you set Selector type to Track. Choose the program number from the dropdown list. If your input file has incorrect metadata, you can choose All channels instead of a program number to have the service ignore the program IDs and include all the programs in the track." }, "RemixSettings": { "shape": "RemixSettings", @@ -1932,10 +1932,10 @@ "Tracks": { "shape": "__listOf__integerMin1Max2147483647", "locationName": "tracks", - "documentation": "Identify a track from the input audio to include in this selector by entering the track index number. To include several tracks in a single audio selector, specify multiple tracks as follows. Using the console, enter a comma-separated list. For examle, type \"1,2,3\" to include tracks 1 through 3. Specifying directly in your JSON job file, provide the track numbers in an array. For example, \"tracks\": [1,2,3]." + "documentation": "Identify a track from the input audio to include in this selector by entering the track index number. To include several tracks in a single audio selector, specify multiple tracks as follows. Using the console, enter a comma-separated list. For example, type \"1,2,3\" to include tracks 1 through 3." } }, - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." + "documentation": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "AudioSelectorGroup": { "type": "structure", @@ -1946,7 +1946,7 @@ "documentation": "Name of an Audio Selector within the same input to include in the group. Audio selector names are standardized, based on their order within the input (e.g., \"Audio Selector 1\"). The audio selector name parameter can be repeated to add any number of audio selectors to the group." } }, - "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." + "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." }, "AudioSelectorType": { "type": "string", @@ -2036,7 +2036,7 @@ }, "Av1AdaptiveQuantization": { "type": "string", - "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization).", + "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization.", "enum": [ "OFF", "LOW", @@ -2048,7 +2048,7 @@ }, "Av1BitDepth": { "type": "string", - "documentation": "Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit (BIT_10).", + "documentation": "Specify the Bit depth. You can choose 8-bit or 10-bit.", "enum": [ "BIT_8", "BIT_10" @@ -2056,7 +2056,7 @@ }, "Av1FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -2077,7 +2077,7 @@ "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", - "documentation": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + "documentation": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." }, "QvbrQualityLevelFineTune": { "shape": "__doubleMin0Max1", @@ -2085,7 +2085,7 @@ "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." } }, - "documentation": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the AV1 codec. Use these settings only when you set QVBR for Rate control mode." }, "Av1RateControlMode": { "type": "string", @@ -2100,17 +2100,17 @@ "AdaptiveQuantization": { "shape": "Av1AdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization)." + "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to Spatial adaptive quantization." }, "BitDepth": { "shape": "Av1BitDepth", "locationName": "bitDepth", - "documentation": "Specify the Bit depth (Av1BitDepth). You can choose 8-bit (BIT_8) or 10-bit (BIT_10)." + "documentation": "Specify the Bit depth. You can choose 8-bit or 10-bit." }, "FramerateControl": { "shape": "Av1FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "Av1FramerateConversionAlgorithm", @@ -2145,7 +2145,7 @@ "QvbrSettings": { "shape": "Av1QvbrSettings", "locationName": "qvbrSettings", - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." }, "RateControlMode": { "shape": "Av1RateControlMode", @@ -2160,14 +2160,14 @@ "SpatialAdaptiveQuantization": { "shape": "Av1SpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." } }, "documentation": "Required when you set Codec, under VideoDescription>CodecSettings to the value AV1." }, "Av1SpatialAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "enum": [ "DISABLED", "ENABLED" @@ -2196,7 +2196,7 @@ }, "AvcIntraFramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -2213,7 +2213,7 @@ }, "AvcIntraInterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -2224,7 +2224,7 @@ }, "AvcIntraScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -2241,12 +2241,12 @@ "AvcIntraUhdSettings": { "shape": "AvcIntraUhdSettings", "locationName": "avcIntraUhdSettings", - "documentation": "Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object isn't allowed." + "documentation": "Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra class to a different value, this object isn't allowed." }, "FramerateControl": { "shape": "AvcIntraFramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "AvcIntraFramerateConversionAlgorithm", @@ -2266,29 +2266,29 @@ "InterlaceMode": { "shape": "AvcIntraInterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "ScanTypeConversionMode": { "shape": "AvcIntraScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SlowPal": { "shape": "AvcIntraSlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." }, "Telecine": { "shape": "AvcIntraTelecine", "locationName": "telecine", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." } }, "documentation": "Required when you choose AVC-Intra for your output video codec. For more information about the AVC-Intra settings, see the relevant specification. For detailed information about SD and HD in AVC-Intra, see https://ieeexplore.ieee.org/document/7290936. For information about 4K/2K in AVC-Intra, see https://pro-av.panasonic.net/en/avc-ultra/AVC-ULTRAoverview.pdf." }, "AvcIntraSlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -2296,7 +2296,7 @@ }, "AvcIntraTelecine": { "type": "string", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "enum": [ "NONE", "HARD" @@ -2304,7 +2304,7 @@ }, "AvcIntraUhdQualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass (MULTI_PASS), your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS).", + "documentation": "Optional. Use Quality tuning level to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass, your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass, your encoding time is faster. The default behavior is Single-pass.", "enum": [ "SINGLE_PASS", "MULTI_PASS" @@ -2316,10 +2316,10 @@ "QualityTuningLevel": { "shape": "AvcIntraUhdQualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass (MULTI_PASS), your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass (SINGLE_PASS), your encoding time is faster. The default behavior is Single-pass (SINGLE_PASS)." + "documentation": "Optional. Use Quality tuning level to choose how many transcoding passes MediaConvert does with your video. When you choose Multi-pass, your video quality is better and your output bitrate is more accurate. That is, the actual bitrate of your output is closer to the target bitrate defined in the specification. When you choose Single-pass, your encoding time is faster. The default behavior is Single-pass." } }, - "documentation": "Optional when you set AVC-Intra class (avcIntraClass) to Class 4K/2K (CLASS_4K_2K). When you set AVC-Intra class to a different value, this object isn't allowed." + "documentation": "Optional when you set AVC-Intra class to Class 4K/2K. When you set AVC-Intra class to a different value, this object isn't allowed." }, "BadRequestException": { "type": "structure", @@ -2384,7 +2384,7 @@ }, "BurnInSubtitleStylePassthrough": { "type": "string", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", "enum": [ "ENABLED", "DISABLED" @@ -2401,27 +2401,27 @@ "ApplyFontColor": { "shape": "BurninSubtitleApplyFontColor", "locationName": "applyFontColor", - "documentation": "Ignore this setting unless Style passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." + "documentation": "Ignore this setting unless Style passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." }, "BackgroundColor": { "shape": "BurninSubtitleBackgroundColor", "locationName": "backgroundColor", - "documentation": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present." + "documentation": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present." }, "BackgroundOpacity": { "shape": "__integerMin0Max255", "locationName": "backgroundOpacity", - "documentation": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions." + "documentation": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions." }, "FallbackFont": { "shape": "BurninSubtitleFallbackFont", "locationName": "fallbackFont", - "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." }, "FontColor": { "shape": "BurninSubtitleFontColor", "locationName": "fontColor", - "documentation": "Specify the color of the burned-in captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present." + "documentation": "Specify the color of the burned-in captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present." }, "FontOpacity": { "shape": "__integerMin0Max255", @@ -2431,17 +2431,17 @@ "FontResolution": { "shape": "__integerMin96Max600", "locationName": "fontResolution", - "documentation": "Specify the Font resolution (FontResolution) in DPI (dots per inch)." + "documentation": "Specify the Font resolution in DPI (dots per inch)." }, "FontScript": { "shape": "FontScript", "locationName": "fontScript", - "documentation": "Set Font script (FontScript) to Automatically determined (AUTOMATIC), or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese." + "documentation": "Set Font script to Automatically determined, or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese." }, "FontSize": { "shape": "__integerMin0Max96", "locationName": "fontSize", - "documentation": "Specify the Font size (FontSize) in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size." + "documentation": "Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size." }, "HexFontColor": { "shape": "__stringMin6Max8Pattern09aFAF609aFAF2", @@ -2451,22 +2451,22 @@ "OutlineColor": { "shape": "BurninSubtitleOutlineColor", "locationName": "outlineColor", - "documentation": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present." + "documentation": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present." }, "OutlineSize": { "shape": "__integerMin0Max10", "locationName": "outlineSize", - "documentation": "Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave Outline size blank and set Style passthrough (StylePassthrough) to enabled to use the outline size data from your input captions, if present." + "documentation": "Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present." }, "ShadowColor": { "shape": "BurninSubtitleShadowColor", "locationName": "shadowColor", - "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present." + "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present." }, "ShadowOpacity": { "shape": "__integerMin0Max255", "locationName": "shadowOpacity", - "documentation": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions." + "documentation": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions." }, "ShadowXOffset": { "shape": "__integerMinNegative2147483648Max2147483647", @@ -2476,30 +2476,30 @@ "ShadowYOffset": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "shadowYOffset", - "documentation": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset (ShadowYOffset) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow y-offset data from your input captions, if present." + "documentation": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset blank and set Style passthrough to enabled to use the shadow y-offset data from your input captions, if present." }, "StylePassthrough": { "shape": "BurnInSubtitleStylePassthrough", "locationName": "stylePassthrough", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." }, "TeletextSpacing": { "shape": "BurninSubtitleTeletextSpacing", "locationName": "teletextSpacing", - "documentation": "Specify whether the text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions." + "documentation": "Specify whether the text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions." }, "XPosition": { "shape": "__integerMin0Max2147483647", "locationName": "xPosition", - "documentation": "Specify the horizontal position (XPosition) of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter." + "documentation": "Specify the horizontal position of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter." }, "YPosition": { "shape": "__integerMin0Max2147483647", "locationName": "yPosition", - "documentation": "Specify the vertical position (YPosition) of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output." + "documentation": "Specify the vertical position of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output." } }, - "documentation": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to BURN_IN." + "documentation": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html." }, "BurninSubtitleAlignment": { "type": "string", @@ -2512,7 +2512,7 @@ }, "BurninSubtitleApplyFontColor": { "type": "string", - "documentation": "Ignore this setting unless Style passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", + "documentation": "Ignore this setting unless Style passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", "enum": [ "WHITE_TEXT_ONLY", "ALL_TEXT" @@ -2520,7 +2520,7 @@ }, "BurninSubtitleBackgroundColor": { "type": "string", - "documentation": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present.", + "documentation": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present.", "enum": [ "NONE", "BLACK", @@ -2530,7 +2530,7 @@ }, "BurninSubtitleFallbackFont": { "type": "string", - "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", "enum": [ "BEST_MATCH", "MONOSPACED_SANSSERIF", @@ -2541,7 +2541,7 @@ }, "BurninSubtitleFontColor": { "type": "string", - "documentation": "Specify the color of the burned-in captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present.", + "documentation": "Specify the color of the burned-in captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present.", "enum": [ "WHITE", "BLACK", @@ -2555,7 +2555,7 @@ }, "BurninSubtitleOutlineColor": { "type": "string", - "documentation": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present.", + "documentation": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present.", "enum": [ "BLACK", "WHITE", @@ -2568,7 +2568,7 @@ }, "BurninSubtitleShadowColor": { "type": "string", - "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present.", + "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present.", "enum": [ "NONE", "BLACK", @@ -2578,7 +2578,7 @@ }, "BurninSubtitleTeletextSpacing": { "type": "string", - "documentation": "Specify whether the text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions.", + "documentation": "Specify whether the text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions.", "enum": [ "FIXED_GRID", "PROPORTIONAL", @@ -2620,7 +2620,7 @@ "DestinationSettings": { "shape": "CaptionDestinationSettings", "locationName": "destinationSettings", - "documentation": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." + "documentation": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." }, "LanguageCode": { "shape": "LanguageCode", @@ -2646,7 +2646,7 @@ "DestinationSettings": { "shape": "CaptionDestinationSettings", "locationName": "destinationSettings", - "documentation": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." + "documentation": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." }, "LanguageCode": { "shape": "LanguageCode", @@ -2667,59 +2667,59 @@ "BurninDestinationSettings": { "shape": "BurninDestinationSettings", "locationName": "burninDestinationSettings", - "documentation": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to BURN_IN." + "documentation": "Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html." }, "DestinationType": { "shape": "CaptionDestinationType", "locationName": "destinationType", - "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20)." + "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20." }, "DvbSubDestinationSettings": { "shape": "DvbSubDestinationSettings", "locationName": "dvbSubDestinationSettings", - "documentation": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to DVB_SUB." + "documentation": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html." }, "EmbeddedDestinationSettings": { "shape": "EmbeddedDestinationSettings", "locationName": "embeddedDestinationSettings", - "documentation": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED." + "documentation": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html." }, "ImscDestinationSettings": { "shape": "ImscDestinationSettings", "locationName": "imscDestinationSettings", - "documentation": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to IMSC." + "documentation": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." }, "SccDestinationSettings": { "shape": "SccDestinationSettings", "locationName": "sccDestinationSettings", - "documentation": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SCC." + "documentation": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html." }, "SrtDestinationSettings": { "shape": "SrtDestinationSettings", "locationName": "srtDestinationSettings", - "documentation": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SRT." + "documentation": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video." }, "TeletextDestinationSettings": { "shape": "TeletextDestinationSettings", "locationName": "teletextDestinationSettings", - "documentation": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TELETEXT." + "documentation": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html." }, "TtmlDestinationSettings": { "shape": "TtmlDestinationSettings", "locationName": "ttmlDestinationSettings", - "documentation": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TTML." + "documentation": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." }, "WebvttDestinationSettings": { "shape": "WebvttDestinationSettings", "locationName": "webvttDestinationSettings", - "documentation": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to WebVTT." + "documentation": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." } }, - "documentation": "Settings related to one captions tab on the MediaConvert console. In your job JSON, an instance of captions DestinationSettings is equivalent to one captions tab in the console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." + "documentation": "Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html." }, "CaptionDestinationType": { "type": "string", - "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded (SCTE20_PLUS_EMBEDDED). To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20 (EMBEDDED_PLUS_SCTE20).", + "documentation": "Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20.", "enum": [ "BURN_IN", "DVB_SUB", @@ -2770,15 +2770,15 @@ "FramerateDenominator": { "shape": "__integerMin1Max1001", "locationName": "framerateDenominator", - "documentation": "Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate numerator (framerateNumerator)." + "documentation": "Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate. Use this setting along with the setting Framerate numerator." }, "FramerateNumerator": { "shape": "__integerMin1Max60000", "locationName": "framerateNumerator", - "documentation": "Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate (CaptionSourceFramerate). Use this setting along with the setting Framerate denominator (framerateDenominator)." + "documentation": "Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate. Use this setting along with the setting Framerate denominator." } }, - "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. When you work directly in your JSON job specification, use the settings framerateNumerator and framerateDenominator. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." }, "CaptionSourceSettings": { "type": "structure", @@ -2806,7 +2806,7 @@ "SourceType": { "shape": "CaptionSourceType", "locationName": "sourceType", - "documentation": "Use Source (SourceType) to identify the format of your input captions. The service cannot auto-detect caption format." + "documentation": "Use Source to identify the format of your input captions. The service cannot auto-detect caption format." }, "TeletextSourceSettings": { "shape": "TeletextSourceSettings", @@ -2828,7 +2828,7 @@ }, "CaptionSourceType": { "type": "string", - "documentation": "Use Source (SourceType) to identify the format of your input captions. The service cannot auto-detect caption format.", + "documentation": "Use Source to identify the format of your input captions. The service cannot auto-detect caption format.", "enum": [ "ANCILLARY", "DVB_SUB", @@ -2855,7 +2855,7 @@ "documentation": "In your JSON job specification, include one child of OutputChannels for each audio channel that you want in your output. Each child should contain one instance of InputChannels or InputChannelsFineTune." } }, - "documentation": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both." + "documentation": "Channel mapping contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both." }, "ClipLimits": { "type": "structure", @@ -2901,7 +2901,7 @@ }, "CmafClientCache": { "type": "string", - "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header.", "enum": [ "DISABLED", "ENABLED" @@ -2926,7 +2926,7 @@ "EncryptionMethod": { "shape": "CmafEncryptionType", "locationName": "encryptionMethod", - "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR)." + "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample or AES_CTR." }, "InitializationVectorInManifest": { "shape": "CmafInitializationVectorInManifest", @@ -2953,7 +2953,7 @@ }, "CmafEncryptionType": { "type": "string", - "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR).", + "documentation": "Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample or AES_CTR.", "enum": [ "SAMPLE_AES", "AES_CTR" @@ -2975,7 +2975,7 @@ "ClientCache": { "shape": "CmafClientCache", "locationName": "clientCache", - "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header." }, "CodecSpecification": { "shape": "CmafCodecSpecification", @@ -2990,7 +2990,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -3005,12 +3005,12 @@ "FragmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "fragmentLength", - "documentation": "Specify the length, in whole seconds, of the mp4 fragments. When you don't specify a value, MediaConvert defaults to 2. Related setting: Use Fragment length control (FragmentLengthControl) to specify whether the encoder enforces this value strictly." + "documentation": "Specify the length, in whole seconds, of the mp4 fragments. When you don't specify a value, MediaConvert defaults to 2. Related setting: Use Fragment length control to specify whether the encoder enforces this value strictly." }, "ImageBasedTrickPlay": { "shape": "CmafImageBasedTrickPlay", "locationName": "imageBasedTrickPlay", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest, MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, "ImageBasedTrickPlaySettings": { "shape": "CmafImageBasedTrickPlaySettings", @@ -3045,12 +3045,12 @@ "MpdProfile": { "shape": "CmafMpdProfile", "locationName": "mpdProfile", - "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE)." + "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file." }, "PtsOffsetHandlingForBFrames": { "shape": "CmafPtsOffsetHandlingForBFrames", "locationName": "ptsOffsetHandlingForBFrames", - "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." + "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." }, "SegmentControl": { "shape": "CmafSegmentControl", @@ -3060,12 +3060,12 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (CmafSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." }, "SegmentLengthControl": { "shape": "CmafSegmentLengthControl", "locationName": "segmentLengthControl", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." }, "StreamInfResolution": { "shape": "CmafStreamInfResolution", @@ -3095,14 +3095,14 @@ "WriteSegmentTimelineInRepresentation": { "shape": "CmafWriteSegmentTimelineInRepresentation", "locationName": "writeSegmentTimelineInRepresentation", - "documentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." + "documentation": "When you enable Precise segment duration in DASH manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element." } }, - "documentation": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to CMAF_GROUP_SETTINGS." + "documentation": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "CmafImageBasedTrickPlay": { "type": "string", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest, MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "enum": [ "NONE", "THUMBNAIL", @@ -3196,7 +3196,7 @@ }, "CmafMpdProfile": { "type": "string", - "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE).", + "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file.", "enum": [ "MAIN_PROFILE", "ON_DEMAND_PROFILE" @@ -3204,7 +3204,7 @@ }, "CmafPtsOffsetHandlingForBFrames": { "type": "string", - "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", + "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", "enum": [ "ZERO_BASED", "MATCH_INITIAL_PTS" @@ -3220,7 +3220,7 @@ }, "CmafSegmentLengthControl": { "type": "string", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "enum": [ "EXACT", "GOP_MULTIPLE" @@ -3268,7 +3268,7 @@ }, "CmafWriteSegmentTimelineInRepresentation": { "type": "string", - "documentation": "When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", + "documentation": "When you enable Precise segment duration in DASH manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", "enum": [ "ENABLED", "DISABLED" @@ -3276,7 +3276,7 @@ }, "CmfcAudioDuration": { "type": "string", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "enum": [ "DEFAULT_CODEC_DURATION", "MATCH_VIDEO_DURATION" @@ -3284,7 +3284,7 @@ }, "CmfcAudioTrackType": { "type": "string", - "documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", + "documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", "enum": [ "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT", "ALTERNATE_AUDIO_AUTO_SELECT", @@ -3293,7 +3293,7 @@ }, "CmfcDescriptiveVideoServiceFlag": { "type": "string", - "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", + "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", "enum": [ "DONT_FLAG", "FLAG" @@ -3301,7 +3301,7 @@ }, "CmfcIFrameOnlyManifest": { "type": "string", - "documentation": "Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE).", + "documentation": "Choose Include to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude.", "enum": [ "INCLUDE", "EXCLUDE" @@ -3317,7 +3317,7 @@ }, "CmfcManifestMetadataSignaling": { "type": "string", - "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough.", + "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough.", "enum": [ "ENABLED", "DISABLED" @@ -3325,7 +3325,7 @@ }, "CmfcScte35Esam": { "type": "string", - "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", + "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML.", "enum": [ "INSERT", "NONE" @@ -3333,7 +3333,7 @@ }, "CmfcScte35Source": { "type": "string", - "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output.", + "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output.", "enum": [ "PASSTHROUGH", "NONE" @@ -3345,32 +3345,32 @@ "AudioDuration": { "shape": "CmfcAudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "AudioGroupId": { "shape": "__string", "locationName": "audioGroupId", - "documentation": "Specify the audio rendition group for this audio rendition. Specify up to one value for each audio output in your output group. This value appears in your HLS parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the value for the GROUP-ID attribute. For example, if you specify \"audio_aac_1\" for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID=\"audio_aac_1\". Related setting: To associate the rendition group that this audio track belongs to with a video rendition, include the same value that you provide here for that video output's setting Audio rendition sets (audioRenditionSets)." + "documentation": "Specify the audio rendition group for this audio rendition. Specify up to one value for each audio output in your output group. This value appears in your HLS parent manifest in the EXT-X-MEDIA tag of TYPE=AUDIO, as the value for the GROUP-ID attribute. For example, if you specify \"audio_aac_1\" for Audio group ID, it appears in your manifest like this: #EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID=\"audio_aac_1\". Related setting: To associate the rendition group that this audio track belongs to with a video rendition, include the same value that you provide here for that video output's setting Audio rendition sets." }, "AudioRenditionSets": { "shape": "__string", "locationName": "audioRenditionSets", - "documentation": "List the audio rendition groups that you want included with this video rendition. Use a comma-separated list. For example, say you want to include the audio rendition groups that have the audio group IDs \"audio_aac_1\" and \"audio_dolby\". Then you would specify this value: \"audio_aac_1,audio_dolby\". Related setting: The rendition groups that you include in your comma-separated list should all match values that you specify in the setting Audio group ID (AudioGroupId) for audio renditions in the same output group as this video rendition. Default behavior: If you don't specify anything here and for Audio group ID, MediaConvert puts each audio variant in its own audio rendition group and associates it with every video variant. Each value in your list appears in your HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. To continue the previous example, say that the file name for the child manifest for your video rendition is \"amazing_video_1.m3u8\". Then, in your parent manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO=\"audio_aac_1\"... amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO=\"audio_dolby\"... amazing_video_1.m3u8" + "documentation": "List the audio rendition groups that you want included with this video rendition. Use a comma-separated list. For example, say you want to include the audio rendition groups that have the audio group IDs \"audio_aac_1\" and \"audio_dolby\". Then you would specify this value: \"audio_aac_1,audio_dolby\". Related setting: The rendition groups that you include in your comma-separated list should all match values that you specify in the setting Audio group ID for audio renditions in the same output group as this video rendition. Default behavior: If you don't specify anything here and for Audio group ID, MediaConvert puts each audio variant in its own audio rendition group and associates it with every video variant. Each value in your list appears in your HLS parent manifest in the EXT-X-STREAM-INF tag as the value for the AUDIO attribute. To continue the previous example, say that the file name for the child manifest for your video rendition is \"amazing_video_1.m3u8\". Then, in your parent manifest, each value will appear on separate lines, like this: #EXT-X-STREAM-INF:AUDIO=\"audio_aac_1\"... amazing_video_1.m3u8 #EXT-X-STREAM-INF:AUDIO=\"audio_dolby\"... amazing_video_1.m3u8" }, "AudioTrackType": { "shape": "CmfcAudioTrackType", "locationName": "audioTrackType", - "documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default (ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT) to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default (ALTERNATE_AUDIO_AUTO_SELECT) to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." + "documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." }, "DescriptiveVideoServiceFlag": { "shape": "CmfcDescriptiveVideoServiceFlag", "locationName": "descriptiveVideoServiceFlag", - "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." + "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." }, "IFrameOnlyManifest": { "shape": "CmfcIFrameOnlyManifest", "locationName": "iFrameOnlyManifest", - "documentation": "Choose Include (INCLUDE) to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE)." + "documentation": "Choose Include to have MediaConvert generate an HLS child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude." }, "KlvMetadata": { "shape": "CmfcKlvMetadata", @@ -3380,44 +3380,44 @@ "ManifestMetadataSignaling": { "shape": "CmfcManifestMetadataSignaling", "locationName": "manifestMetadataSignaling", - "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough." + "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough." }, "Scte35Esam": { "shape": "CmfcScte35Esam", "locationName": "scte35Esam", - "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." }, "Scte35Source": { "shape": "CmfcScte35Source", "locationName": "scte35Source", - "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output." + "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output." }, "TimedMetadata": { "shape": "CmfcTimedMetadata", "locationName": "timedMetadata", - "documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank." + "documentation": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank." }, "TimedMetadataBoxVersion": { "shape": "CmfcTimedMetadataBoxVersion", "locationName": "timedMetadataBoxVersion", - "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough." }, "TimedMetadataSchemeIdUri": { "shape": "__stringMax1000", "locationName": "timedMetadataSchemeIdUri", - "documentation": "Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata to Passthrough." }, "TimedMetadataValue": { "shape": "__stringMax1000", "locationName": "timedMetadataValue", - "documentation": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata to Passthrough." } }, "documentation": "These settings relate to the fragmented MP4 container for the segments in your CMAF outputs." }, "CmfcTimedMetadata": { "type": "string", - "documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", + "documentation": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank.", "enum": [ "PASSTHROUGH", "NONE" @@ -3425,7 +3425,7 @@ }, "CmfcTimedMetadataBoxVersion": { "type": "string", - "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough.", + "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough.", "enum": [ "VERSION_0", "VERSION_1" @@ -3457,7 +3457,7 @@ "Hdr10Metadata": { "shape": "Hdr10Metadata", "locationName": "hdr10Metadata", - "documentation": "Use these settings when you convert to the HDR 10 color space. Specify the SMPTE ST 2086 Mastering Display Color Volume static metadata that you want signaled in the output. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator. When you set Color space conversion (ColorSpaceConversion) to HDR 10 (FORCE_HDR10), these settings are required. You must set values for Max frame average light level (maxFrameAverageLightLevel) and Max content light level (maxContentLightLevel); these settings don't have a default value. The default values for the other HDR 10 metadata settings are defined by the P3D65 color space. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr." + "documentation": "Use these settings when you convert to the HDR 10 color space. Specify the SMPTE ST 2086 Mastering Display Color Volume static metadata that you want signaled in the output. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator. When you set Color space conversion to HDR 10, these settings are required. You must set values for Max frame average light level and Max content light level; these settings don't have a default value. The default values for the other HDR 10 metadata settings are defined by the P3D65 color space. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr." }, "HdrToSdrToneMapper": { "shape": "HDRToSDRToneMapper", @@ -3489,7 +3489,7 @@ }, "ColorMetadata": { "type": "string", - "documentation": "Choose Insert (INSERT) for this setting to include color metadata in this output. Choose Ignore (IGNORE) to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default.", + "documentation": "Choose Insert for this setting to include color metadata in this output. Choose Ignore to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default.", "enum": [ "IGNORE", "INSERT" @@ -3525,7 +3525,7 @@ }, "ColorSpaceUsage": { "type": "string", - "documentation": "There are two sources for color metadata, the input file and the job input settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). The Color space usage setting determines which takes precedence. Choose Force (FORCE) to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings.", + "documentation": "There are two sources for color metadata, the input file and the job input settings Color space and HDR master display information settings. The Color space usage setting determines which takes precedence. Choose Force to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings.", "enum": [ "FORCE", "FALLBACK" @@ -3573,7 +3573,7 @@ "M2tsSettings": { "shape": "M2tsSettings", "locationName": "m2tsSettings", - "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." + "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." }, "M3u8Settings": { "shape": "M3u8Settings", @@ -3896,7 +3896,7 @@ "PlaybackDeviceCompatibility": { "shape": "DashIsoPlaybackDeviceCompatibility", "locationName": "playbackDeviceCompatibility", - "documentation": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted." + "documentation": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted." }, "SpekeKeyProvider": { "shape": "SpekeKeyProvider", @@ -3908,7 +3908,7 @@ }, "DashIsoGroupAudioChannelConfigSchemeIdUri": { "type": "string", - "documentation": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.", + "documentation": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration, to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.", "enum": [ "MPEG_CHANNEL_CONFIGURATION", "DOLBY_CHANNEL_CONFIGURATION" @@ -3925,7 +3925,7 @@ "AudioChannelConfigSchemeIdUri": { "shape": "DashIsoGroupAudioChannelConfigSchemeIdUri", "locationName": "audioChannelConfigSchemeIdUri", - "documentation": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration (MPEG_CHANNEL_CONFIGURATION), to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration (DOLBY_CHANNEL_CONFIGURATION) to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011." + "documentation": "Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration, to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011." }, "BaseUrl": { "shape": "__string", @@ -3940,7 +3940,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -3965,7 +3965,7 @@ "ImageBasedTrickPlay": { "shape": "DashIsoImageBasedTrickPlay", "locationName": "imageBasedTrickPlay", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, "ImageBasedTrickPlaySettings": { "shape": "DashIsoImageBasedTrickPlaySettings", @@ -3990,12 +3990,12 @@ "MpdProfile": { "shape": "DashIsoMpdProfile", "locationName": "mpdProfile", - "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE)." + "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file." }, "PtsOffsetHandlingForBFrames": { "shape": "DashIsoPtsOffsetHandlingForBFrames", "locationName": "ptsOffsetHandlingForBFrames", - "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." + "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here." }, "SegmentControl": { "shape": "DashIsoSegmentControl", @@ -4005,12 +4005,12 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (DashIsoSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." }, "SegmentLengthControl": { "shape": "DashIsoSegmentLengthControl", "locationName": "segmentLengthControl", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." }, "VideoCompositionOffsets": { "shape": "DashIsoVideoCompositionOffsets", @@ -4023,7 +4023,7 @@ "documentation": "If you get an HTTP error in the 400 range when you play back your DASH output, enable this setting and run your transcoding job again. When you enable this setting, the service writes precise segment durations in the DASH manifest. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When you don't enable this setting, the service writes approximate segment durations in your DASH manifest." } }, - "documentation": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to DASH_ISO_GROUP_SETTINGS." + "documentation": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "DashIsoHbbtvCompliance": { "type": "string", @@ -4035,7 +4035,7 @@ }, "DashIsoImageBasedTrickPlay": { "type": "string", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "enum": [ "NONE", "THUMBNAIL", @@ -4097,7 +4097,7 @@ }, "DashIsoMpdProfile": { "type": "string", - "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control (SegmentControl) to Single file (SINGLE_FILE).", + "documentation": "Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file.", "enum": [ "MAIN_PROFILE", "ON_DEMAND_PROFILE" @@ -4105,7 +4105,7 @@ }, "DashIsoPlaybackDeviceCompatibility": { "type": "string", - "documentation": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1 (CENC_V1). If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted.", + "documentation": "This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted.", "enum": [ "CENC_V1", "UNENCRYPTED_SEI" @@ -4113,7 +4113,7 @@ }, "DashIsoPtsOffsetHandlingForBFrames": { "type": "string", - "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS (MATCH_INITIAL_PTS) when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based (ZERO_BASED) to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", + "documentation": "Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.", "enum": [ "ZERO_BASED", "MATCH_INITIAL_PTS" @@ -4129,7 +4129,7 @@ }, "DashIsoSegmentLengthControl": { "type": "string", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "enum": [ "EXACT", "GOP_MULTIPLE" @@ -4145,7 +4145,7 @@ }, "DashIsoWriteSegmentTimelineInRepresentation": { "type": "string", - "documentation": "When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", + "documentation": "When you enable Precise segment duration in manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", "enum": [ "ENABLED", "DISABLED" @@ -4196,7 +4196,7 @@ "Mode": { "shape": "DeinterlacerMode", "locationName": "mode", - "documentation": "Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. Default is Deinterlace. - Deinterlace converts interlaced to progressive. - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. - Adaptive auto-detects and converts to progressive." + "documentation": "Use Deinterlacer to choose how the service will do deinterlacing. Default is Deinterlace.\n- Deinterlace converts interlaced to progressive.\n- Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p.\n- Adaptive auto-detects and converts to progressive." } }, "documentation": "Settings for deinterlacer" @@ -4211,7 +4211,7 @@ }, "DeinterlacerMode": { "type": "string", - "documentation": "Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. Default is Deinterlace. - Deinterlace converts interlaced to progressive. - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. - Adaptive auto-detects and converts to progressive.", + "documentation": "Use Deinterlacer to choose how the service will do deinterlacing. Default is Deinterlace.\n- Deinterlace converts interlaced to progressive.\n- Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p.\n- Adaptive auto-detects and converts to progressive.", "enum": [ "DEINTERLACE", "INVERSE_TELECINE", @@ -4428,7 +4428,7 @@ }, "DropFrameTimecode": { "type": "string", - "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion (TimecodeInsertion) is enabled.", + "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled.", "enum": [ "DISABLED", "ENABLED" @@ -4453,7 +4453,7 @@ "documentation": "The number of milliseconds between instances of this table in the output transport stream." } }, - "documentation": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output." }, "DvbSdtSettings": { "type": "structure", @@ -4479,7 +4479,7 @@ "documentation": "The service provider name placed in the service_descriptor in the Service Description Table. Maximum length is 256 characters." } }, - "documentation": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output." }, "DvbSubDestinationSettings": { "type": "structure", @@ -4492,17 +4492,17 @@ "ApplyFontColor": { "shape": "DvbSubtitleApplyFontColor", "locationName": "applyFontColor", - "documentation": "Ignore this setting unless Style Passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." + "documentation": "Ignore this setting unless Style Passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text." }, "BackgroundColor": { "shape": "DvbSubtitleBackgroundColor", "locationName": "backgroundColor", - "documentation": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present." + "documentation": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present." }, "BackgroundOpacity": { "shape": "__integerMin0Max255", "locationName": "backgroundOpacity", - "documentation": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." }, "DdsHandling": { "shape": "DvbddsHandling", @@ -4512,22 +4512,22 @@ "DdsXCoordinate": { "shape": "__integerMin0Max2147483647", "locationName": "ddsXCoordinate", - "documentation": "Use this setting, along with DDS y-coordinate (ddsYCoordinate), to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the left side of the frame and the left side of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match." + "documentation": "Use this setting, along with DDS y-coordinate, to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the left side of the frame and the left side of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling to a value other than None. MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment. All burn-in and DVB-Sub font settings must match." }, "DdsYCoordinate": { "shape": "__integerMin0Max2147483647", "locationName": "ddsYCoordinate", - "documentation": "Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the top of the frame and the top of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match." + "documentation": "Use this setting, along with DDS x-coordinate, to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the top of the frame and the top of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling to a value other than None. MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match." }, "FallbackFont": { "shape": "DvbSubSubtitleFallbackFont", "locationName": "fallbackFont", - "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." }, "FontColor": { "shape": "DvbSubtitleFontColor", "locationName": "fontColor", - "documentation": "Specify the color of the captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the color of the captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "FontOpacity": { "shape": "__integerMin0Max255", @@ -4537,22 +4537,22 @@ "FontResolution": { "shape": "__integerMin96Max600", "locationName": "fontResolution", - "documentation": "Specify the Font resolution (FontResolution) in DPI (dots per inch).\nWithin your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the Font resolution in DPI (dots per inch).\nWithin your job settings, all of your DVB-Sub settings must be identical." }, "FontScript": { "shape": "FontScript", "locationName": "fontScript", - "documentation": "Set Font script (FontScript) to Automatically determined (AUTOMATIC), or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Set Font script to Automatically determined, or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese. Within your job settings, all of your DVB-Sub settings must be identical." }, "FontSize": { "shape": "__integerMin0Max96", "locationName": "fontSize", - "documentation": "Specify the Font size (FontSize) in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size. Within your job settings, all of your DVB-Sub settings must be identical." }, "Height": { "shape": "__integerMin1Max2147483647", "locationName": "height", - "documentation": "Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match." + "documentation": "Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling to a value other than None. All burn-in and DVB-Sub font settings must match." }, "HexFontColor": { "shape": "__stringMin6Max8Pattern09aFAF609aFAF2", @@ -4562,22 +4562,22 @@ "OutlineColor": { "shape": "DvbSubtitleOutlineColor", "locationName": "outlineColor", - "documentation": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "OutlineSize": { "shape": "__integerMin0Max10", "locationName": "outlineSize", - "documentation": "Specify the Outline size (OutlineSize) of the caption text, in pixels. Leave Outline size blank and set Style passthrough (StylePassthrough) to enabled to use the outline size data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "ShadowColor": { "shape": "DvbSubtitleShadowColor", "locationName": "shadowColor", - "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "ShadowOpacity": { "shape": "__integerMin0Max255", "locationName": "shadowOpacity", - "documentation": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough (StylePassthrough) is set to Enabled, leave Shadow opacity (ShadowOpacity) blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. Within your job settings, all of your DVB-Sub settings must be identical." }, "ShadowXOffset": { "shape": "__integerMinNegative2147483648Max2147483647", @@ -4587,12 +4587,12 @@ "ShadowYOffset": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "shadowYOffset", - "documentation": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset (ShadowYOffset) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow y-offset data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset blank and set Style passthrough to enabled to use the shadow y-offset data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical." }, "StylePassthrough": { "shape": "DvbSubtitleStylePassthrough", "locationName": "stylePassthrough", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings." }, "SubtitlingType": { "shape": "DvbSubtitlingType", @@ -4602,25 +4602,25 @@ "TeletextSpacing": { "shape": "DvbSubtitleTeletextSpacing", "locationName": "teletextSpacing", - "documentation": "Specify whether the Text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify whether the Text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical." }, "Width": { "shape": "__integerMin1Max2147483647", "locationName": "width", - "documentation": "Specify the width, in pixels, of this set of DVB-Sub captions. The default value is 720 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match." + "documentation": "Specify the width, in pixels, of this set of DVB-Sub captions. The default value is 720 pixels. Related setting: When you use this setting, you must set DDS handling to a value other than None. All burn-in and DVB-Sub font settings must match." }, "XPosition": { "shape": "__integerMin0Max2147483647", "locationName": "xPosition", - "documentation": "Specify the horizontal position (XPosition) of the captions, relative to the left side of the outputin pixels. A value of 10 would result in the captions starting 10 pixels from the left ofthe output. If no explicit x_position is provided, the horizontal caption position will bedetermined by the alignment parameter. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the horizontal position of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter. Within your job settings, all of your DVB-Sub settings must be identical." }, "YPosition": { "shape": "__integerMin0Max2147483647", "locationName": "yPosition", - "documentation": "Specify the vertical position (YPosition) of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. Within your job settings, all of your DVB-Sub settings must be identical." + "documentation": "Specify the vertical position of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. Within your job settings, all of your DVB-Sub settings must be identical." } }, - "documentation": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to DVB_SUB." + "documentation": "Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html." }, "DvbSubSourceSettings": { "type": "structure", @@ -4635,7 +4635,7 @@ }, "DvbSubSubtitleFallbackFont": { "type": "string", - "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font (FallbackFont) to best match (BEST_MATCH), or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", "enum": [ "BEST_MATCH", "MONOSPACED_SANSSERIF", @@ -4655,7 +4655,7 @@ }, "DvbSubtitleApplyFontColor": { "type": "string", - "documentation": "Ignore this setting unless Style Passthrough (StylePassthrough) is set to Enabled and Font color (FontColor) set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color (ApplyFontColor) for additional font color controls. When you choose White text only (WHITE_TEXT_ONLY), or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", + "documentation": "Ignore this setting unless Style Passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.", "enum": [ "WHITE_TEXT_ONLY", "ALL_TEXT" @@ -4663,7 +4663,7 @@ }, "DvbSubtitleBackgroundColor": { "type": "string", - "documentation": "Specify the color of the rectangle behind the captions. Leave background color (BackgroundColor) blank and set Style passthrough (StylePassthrough) to enabled to use the background color data from your input captions, if present.", + "documentation": "Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present.", "enum": [ "NONE", "BLACK", @@ -4673,7 +4673,7 @@ }, "DvbSubtitleFontColor": { "type": "string", - "documentation": "Specify the color of the captions text. Leave Font color (FontColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "documentation": "Specify the color of the captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "enum": [ "WHITE", "BLACK", @@ -4687,7 +4687,7 @@ }, "DvbSubtitleOutlineColor": { "type": "string", - "documentation": "Specify font outline color. Leave Outline color (OutlineColor) blank and set Style passthrough (StylePassthrough) to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "documentation": "Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "enum": [ "BLACK", "WHITE", @@ -4700,7 +4700,7 @@ }, "DvbSubtitleShadowColor": { "type": "string", - "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color (ShadowColor) blank and set Style passthrough (StylePassthrough) to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", + "documentation": "Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.", "enum": [ "NONE", "BLACK", @@ -4710,7 +4710,7 @@ }, "DvbSubtitleStylePassthrough": { "type": "string", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.", "enum": [ "ENABLED", "DISABLED" @@ -4718,7 +4718,7 @@ }, "DvbSubtitleTeletextSpacing": { "type": "string", - "documentation": "Specify whether the Text spacing (TeletextSpacing) in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid (FIXED_GRID) to conform to the spacing specified in the captions file more accurately. Choose proportional (PROPORTIONAL) to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical.", + "documentation": "Specify whether the Text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical.", "enum": [ "FIXED_GRID", "PROPORTIONAL", @@ -4742,7 +4742,7 @@ "documentation": "The number of milliseconds between instances of this table in the output transport stream." } }, - "documentation": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output." }, "DvbddsHandling": { "type": "string", @@ -4780,7 +4780,7 @@ }, "Eac3AtmosDownmixControl": { "type": "string", - "documentation": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom (SPECIFIED) to provide downmix values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for Downmix control (DownmixControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings.", + "documentation": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom to provide downmix values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround, Left total/Right total surround, Left total/Right total center, Left only/Right only center, and Stereo downmix. When you keep Custom for Downmix control and you don't specify values for the related settings, MediaConvert uses default values for those settings.", "enum": [ "SPECIFIED", "INITIALIZE_FROM_SOURCE" @@ -4788,7 +4788,7 @@ }, "Eac3AtmosDynamicRangeCompressionLine": { "type": "string", - "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression line. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "NONE", "FILM_STANDARD", @@ -4800,7 +4800,7 @@ }, "Eac3AtmosDynamicRangeCompressionRf": { "type": "string", - "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression RF. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "NONE", "FILM_STANDARD", @@ -4812,7 +4812,7 @@ }, "Eac3AtmosDynamicRangeControl": { "type": "string", - "documentation": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom (SPECIFIED), to provide dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings.", + "documentation": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom, to provide dynamic range control values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line and Dynamic range compression RF. When you keep the value Custom for Dynamic range control and you don't specify values for the related settings, MediaConvert uses default values for those settings.", "enum": [ "SPECIFIED", "INITIALIZE_FROM_SOURCE" @@ -4855,42 +4855,42 @@ "DownmixControl": { "shape": "Eac3AtmosDownmixControl", "locationName": "downmixControl", - "documentation": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom (SPECIFIED) to provide downmix values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround (LoRoSurroundMixLevel), Left total/Right total surround (LtRtSurroundMixLevel), Left total/Right total center (LtRtCenterMixLevel), Left only/Right only center (LoRoCenterMixLevel), and Stereo downmix (StereoDownmix). When you keep Custom (SPECIFIED) for Downmix control (DownmixControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings." + "documentation": "Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom to provide downmix values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround, Left total/Right total surround, Left total/Right total center, Left only/Right only center, and Stereo downmix. When you keep Custom for Downmix control and you don't specify values for the related settings, MediaConvert uses default values for those settings." }, "DynamicRangeCompressionLine": { "shape": "Eac3AtmosDynamicRangeCompressionLine", "locationName": "dynamicRangeCompressionLine", - "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression line (DynamicRangeCompressionLine). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression line. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "DynamicRangeCompressionRf": { "shape": "Eac3AtmosDynamicRangeCompressionRf", "locationName": "dynamicRangeCompressionRf", - "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light (ATMOS_STORAGE_DDP_COMPR_FILM_LIGHT) Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom (SPECIFIED) for the setting Dynamic range control (DynamicRangeControl). Otherwise, MediaConvert ignores Dynamic range compression RF (DynamicRangeCompressionRf). For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression RF. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "DynamicRangeControl": { "shape": "Eac3AtmosDynamicRangeControl", "locationName": "dynamicRangeControl", - "documentation": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom (SPECIFIED), to provide dynamic range control values in your job settings. Choose Follow source (INITIALIZE_FROM_SOURCE) to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line (DynamicRangeCompressionLine) and Dynamic range compression RF (DynamicRangeCompressionRf). When you keep the value Custom (SPECIFIED) for Dynamic range control (DynamicRangeControl) and you don't specify values for the related settings, MediaConvert uses default values for those settings." + "documentation": "Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom, to provide dynamic range control values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line and Dynamic range compression RF. When you keep the value Custom for Dynamic range control and you don't specify values for the related settings, MediaConvert uses default values for those settings." }, "LoRoCenterMixLevel": { "shape": "__doubleMinNegative6Max3", "locationName": "loRoCenterMixLevel", - "documentation": "Specify a value for the following Dolby Atmos setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right only center (LoRoCenterMixLevel)." + "documentation": "Specify a value for the following Dolby Atmos setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left only/Right only center." }, "LoRoSurroundMixLevel": { "shape": "__doubleMinNegative60MaxNegative1", "locationName": "loRoSurroundMixLevel", - "documentation": "Specify a value for the following Dolby Atmos setting: Left only/Right only (Lo/Ro surround). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left only/Right only surround (LoRoSurroundMixLevel)." + "documentation": "Specify a value for the following Dolby Atmos setting: Left only/Right only. MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left only/Right only surround." }, "LtRtCenterMixLevel": { "shape": "__doubleMinNegative6Max3", "locationName": "ltRtCenterMixLevel", - "documentation": "Specify a value for the following Dolby Atmos setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Left total/Right total center (LtRtCenterMixLevel)." + "documentation": "Specify a value for the following Dolby Atmos setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default value: -3 dB Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left total/Right total center." }, "LtRtSurroundMixLevel": { "shape": "__doubleMinNegative60MaxNegative1", "locationName": "ltRtSurroundMixLevel", - "documentation": "Specify a value for the following Dolby Atmos setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. Default value: -3 dB (ATMOS_STORAGE_DDP_MIXLEV_MINUS_3_DB) Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix (Eac3AtmosStereoDownmix). Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, the service ignores Left total/Right total surround (LtRtSurroundMixLevel)." + "documentation": "Specify a value for the following Dolby Atmos setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. Default value: -3 dB Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, the service ignores Left total/Right total surround." }, "MeteringMode": { "shape": "Eac3AtmosMeteringMode", @@ -4910,7 +4910,7 @@ "StereoDownmix": { "shape": "Eac3AtmosStereoDownmix", "locationName": "stereoDownmix", - "documentation": "Choose how the service does stereo downmixing. Default value: Not indicated (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo downmix (StereoDownmix)." + "documentation": "Choose how the service does stereo downmixing. Default value: Not indicated Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo downmix." }, "SurroundExMode": { "shape": "Eac3AtmosSurroundExMode", @@ -4918,11 +4918,11 @@ "documentation": "Specify whether your input audio has an additional center rear surround channel matrix encoded into your left and right surround channels." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3_ATMOS." + "documentation": "Required when you set Codec to the value EAC3_ATMOS." }, "Eac3AtmosStereoDownmix": { "type": "string", - "documentation": "Choose how the service does stereo downmixing. Default value: Not indicated (ATMOS_STORAGE_DDP_DMIXMOD_NOT_INDICATED) Related setting: To have MediaConvert use this value, keep the default value, Custom (SPECIFIED) for the setting Downmix control (DownmixControl). Otherwise, MediaConvert ignores Stereo downmix (StereoDownmix).", + "documentation": "Choose how the service does stereo downmixing. Default value: Not indicated Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo downmix.", "enum": [ "NOT_INDICATED", "STEREO", @@ -4977,7 +4977,7 @@ }, "Eac3DynamicRangeCompressionLine": { "type": "string", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "NONE", "FILM_STANDARD", @@ -4989,7 +4989,7 @@ }, "Eac3DynamicRangeCompressionRf": { "type": "string", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.", "enum": [ "NONE", "FILM_STANDARD", @@ -5075,12 +5075,12 @@ "DynamicRangeCompressionLine": { "shape": "Eac3DynamicRangeCompressionLine", "locationName": "dynamicRangeCompressionLine", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "DynamicRangeCompressionRf": { "shape": "Eac3DynamicRangeCompressionRf", "locationName": "dynamicRangeCompressionRf", - "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile (DynamicRangeCompressionProfile). For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." + "documentation": "Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf." }, "LfeControl": { "shape": "Eac3LfeControl", @@ -5095,22 +5095,22 @@ "LoRoCenterMixLevel": { "shape": "__doubleMinNegative60Max3", "locationName": "loRoCenterMixLevel", - "documentation": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left only/Right only center (loRoCenterMixLevel)." + "documentation": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only center mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left only/Right only center." }, "LoRoSurroundMixLevel": { "shape": "__doubleMinNegative60MaxNegative1", "locationName": "loRoSurroundMixLevel", - "documentation": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only (Lo/Ro surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left only/Right only surround (loRoSurroundMixLevel)." + "documentation": "Specify a value for the following Dolby Digital Plus setting: Left only/Right only. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left only/Right only surround." }, "LtRtCenterMixLevel": { "shape": "__doubleMinNegative60Max3", "locationName": "ltRtCenterMixLevel", - "documentation": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left total/Right total center (ltRtCenterMixLevel)." + "documentation": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total center mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left total/Right total center." }, "LtRtSurroundMixLevel": { "shape": "__doubleMinNegative60MaxNegative1", "locationName": "ltRtSurroundMixLevel", - "documentation": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix (Eac3StereoDownmix). Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Left total/Right total surround (ltRtSurroundMixLevel)." + "documentation": "Specify a value for the following Dolby Digital Plus setting: Left total/Right total surround mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left total/Right total surround." }, "MetadataControl": { "shape": "Eac3MetadataControl", @@ -5135,7 +5135,7 @@ "StereoDownmix": { "shape": "Eac3StereoDownmix", "locationName": "stereoDownmix", - "documentation": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix)." + "documentation": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Stereo downmix." }, "SurroundExMode": { "shape": "Eac3SurroundExMode", @@ -5148,11 +5148,11 @@ "documentation": "When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into the two channels." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value EAC3." + "documentation": "Required when you set Codec to the value EAC3." }, "Eac3StereoDownmix": { "type": "string", - "documentation": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) for the setting Coding mode (Eac3CodingMode). If you choose a different value for Coding mode, the service ignores Stereo downmix (Eac3StereoDownmix).", + "documentation": "Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Stereo downmix.", "enum": [ "NOT_INDICATED", "LO_RO", @@ -5180,7 +5180,7 @@ }, "EmbeddedConvert608To708": { "type": "string", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "enum": [ "UPCONVERT", "DISABLED" @@ -5197,10 +5197,10 @@ "Destination708ServiceNumber": { "shape": "__integerMin1Max6", "locationName": "destination708ServiceNumber", - "documentation": "Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert (Convert608To708) to Upconvert (UPCONVERT) in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number (destination608ChannelNumber) for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded." + "documentation": "Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert to Upconvert in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded." } }, - "documentation": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to EMBEDDED, EMBEDDED_PLUS_SCTE20, or SCTE20_PLUS_EMBEDDED." + "documentation": "Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html." }, "EmbeddedSourceSettings": { "type": "structure", @@ -5208,7 +5208,7 @@ "Convert608To708": { "shape": "EmbeddedConvert608To708", "locationName": "convert608To708", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "Source608ChannelNumber": { "shape": "__integerMin1Max4", @@ -5238,7 +5238,7 @@ }, "EmbeddedTimecodeOverride": { "type": "string", - "documentation": "Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode override blank, or set to None (NONE), when your input does not contain MDPM timecode.", + "documentation": "Set Embedded timecode override to Use MDPM when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata. When you do, we recommend you also set Timecode source to Embedded. Leave Embedded timecode override blank, or set to None, when your input does not contain MDPM timecode.", "enum": [ "NONE", "USE_MDPM" @@ -5272,7 +5272,7 @@ "ManifestConfirmConditionNotification": { "shape": "EsamManifestConfirmConditionNotification", "locationName": "manifestConfirmConditionNotification", - "documentation": "Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning instructions that you provide in the setting MCC XML (mccXml)." + "documentation": "Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning instructions that you provide in the setting MCC XML." }, "ResponseSignalPreroll": { "shape": "__integerMin0Max30000", @@ -5282,7 +5282,7 @@ "SignalProcessingNotification": { "shape": "EsamSignalProcessingNotification", "locationName": "signalProcessingNotification", - "documentation": "Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing instructions that you provide in the setting SCC XML (sccXml)." + "documentation": "Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing instructions that you provide in the setting SCC XML." } }, "documentation": "Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, you can ignore these settings." @@ -5293,7 +5293,7 @@ "SccXml": { "shape": "__stringPatternSNSignalProcessingNotificationNS", "locationName": "sccXml", - "documentation": "Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM (scte35Esam). Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both." + "documentation": "Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM. Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both." } }, "documentation": "ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025." @@ -5348,7 +5348,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -5356,11 +5356,11 @@ "documentation": "Settings associated with the destination. Will vary based on the type of destination" } }, - "documentation": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to FILE_GROUP_SETTINGS." + "documentation": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package." }, "FileSourceConvert608To708": { "type": "string", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.", "enum": [ "UPCONVERT", "DISABLED" @@ -5372,7 +5372,7 @@ "Convert608To708": { "shape": "FileSourceConvert608To708", "locationName": "convert608To708", - "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." + "documentation": "Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708." }, "ConvertPaintToPop": { "shape": "CaptionSourceConvertPaintOnToPopOn", @@ -5382,7 +5382,7 @@ "Framerate": { "shape": "CaptionSourceFramerate", "locationName": "framerate", - "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. When you work directly in your JSON job specification, use the settings framerateNumerator and framerateDenominator. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." + "documentation": "Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps." }, "SourceFile": { "shape": "__stringMin14PatternS3SccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIVttVTTWebvttWEBVTTHttpsSccSCCTtmlTTMLDfxpDFXPStlSTLSrtSRTXmlXMLSmiSMIVttVTTWebvttWEBVTT", @@ -5392,19 +5392,19 @@ "TimeDelta": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "timeDelta", - "documentation": "Optional. Use this setting when you need to adjust the sync between your sidecar captions and your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/time-delta-use-cases.html. Enter a positive or negative number to modify the times in the captions file. For example, type 15 to add 15 seconds to all the times in the captions file. Type -5 to subtract 5 seconds from the times in the captions file. You can optionally specify your time delta in milliseconds instead of seconds. When you do so, set the related setting, Time delta units (TimeDeltaUnits) to Milliseconds (MILLISECONDS). Note that, when you specify a time delta for timecode-based caption sources, such as SCC and STL, and your time delta isn't a multiple of the input frame rate, MediaConvert snaps the captions to the nearest frame. For example, when your input video frame rate is 25 fps and you specify 1010ms for time delta, MediaConvert delays your captions by 1000 ms." + "documentation": "Optional. Use this setting when you need to adjust the sync between your sidecar captions and your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/time-delta-use-cases.html. Enter a positive or negative number to modify the times in the captions file. For example, type 15 to add 15 seconds to all the times in the captions file. Type -5 to subtract 5 seconds from the times in the captions file. You can optionally specify your time delta in milliseconds instead of seconds. When you do so, set the related setting, Time delta units to Milliseconds. Note that, when you specify a time delta for timecode-based caption sources, such as SCC and STL, and your time delta isn't a multiple of the input frame rate, MediaConvert snaps the captions to the nearest frame. For example, when your input video frame rate is 25 fps and you specify 1010ms for time delta, MediaConvert delays your captions by 1000 ms." }, "TimeDeltaUnits": { "shape": "FileSourceTimeDeltaUnits", "locationName": "timeDeltaUnits", - "documentation": "When you use the setting Time delta (TimeDelta) to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units (TimeDeltaUnits), MediaConvert uses seconds by default." + "documentation": "When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default." } }, "documentation": "If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings." }, "FileSourceTimeDeltaUnits": { "type": "string", - "documentation": "When you use the setting Time delta (TimeDelta) to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units (TimeDeltaUnits), MediaConvert uses seconds by default.", + "documentation": "When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default.", "enum": [ "SECONDS", "MILLISECONDS" @@ -5473,7 +5473,7 @@ "documentation": "JPEG Quality - a higher value equals higher quality." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value FRAME_CAPTURE." + "documentation": "Required when you set Codec to the value FRAME_CAPTURE." }, "GetJobRequest": { "type": "structure", @@ -5588,7 +5588,7 @@ }, "H264AdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.", + "documentation": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization.", "enum": [ "OFF", "AUTO", @@ -5601,7 +5601,7 @@ }, "H264CodecLevel": { "type": "string", - "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto (AUTO).", + "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto.", "enum": [ "AUTO", "LEVEL_1", @@ -5636,7 +5636,7 @@ }, "H264DynamicSubGop": { "type": "string", - "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", "enum": [ "ADAPTIVE", "STATIC" @@ -5652,7 +5652,7 @@ }, "H264FieldEncoding": { "type": "string", - "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs.", + "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs.", "enum": [ "PAFF", "FORCE_FIELD", @@ -5661,7 +5661,7 @@ }, "H264FlickerAdaptiveQuantization": { "type": "string", - "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled. Change this value to Enabled to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -5669,7 +5669,7 @@ }, "H264FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -5694,7 +5694,7 @@ }, "H264GopSizeUnits": { "type": "string", - "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize).", + "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size.", "enum": [ "FRAMES", "SECONDS", @@ -5703,7 +5703,7 @@ }, "H264InterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -5714,7 +5714,7 @@ }, "H264ParControl": { "type": "string", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -5740,7 +5740,7 @@ "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", - "documentation": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + "documentation": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." }, "QvbrQualityLevelFineTune": { "shape": "__doubleMin0Max1", @@ -5748,7 +5748,7 @@ "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." } }, - "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.264 codec. Use these settings only when you set QVBR for Rate control mode." }, "H264RateControlMode": { "type": "string", @@ -5769,7 +5769,7 @@ }, "H264ScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -5777,7 +5777,7 @@ }, "H264SceneChangeDetect": { "type": "string", - "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", + "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", "enum": [ "DISABLED", "ENABLED", @@ -5790,7 +5790,7 @@ "AdaptiveQuantization": { "shape": "H264AdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization (H264AdaptiveQuantization) to Off (OFF). Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization." + "documentation": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set H264AdaptiveQuantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: H264FlickerAdaptiveQuantization, H264SpatialAdaptiveQuantization, and H264TemporalAdaptiveQuantization." }, "BandwidthReductionFilter": { "shape": "BandwidthReductionFilter", @@ -5805,7 +5805,7 @@ "CodecLevel": { "shape": "H264CodecLevel", "locationName": "codecLevel", - "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto (AUTO)." + "documentation": "Specify an H.264 level that is consistent with your output video settings. If you aren't sure what level to specify, choose Auto." }, "CodecProfile": { "shape": "H264CodecProfile", @@ -5825,17 +5825,17 @@ "FieldEncoding": { "shape": "H264FieldEncoding", "locationName": "fieldEncoding", - "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs." + "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs." }, "FlickerAdaptiveQuantization": { "shape": "H264FlickerAdaptiveQuantization", "locationName": "flickerAdaptiveQuantization", - "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled (DISABLED). Change this value to Enabled (ENABLED) to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264FlickerAdaptiveQuantization is Disabled. Change this value to Enabled to reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. To manually enable or disable H264FlickerAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." }, "FramerateControl": { "shape": "H264FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "H264FramerateConversionAlgorithm", @@ -5860,17 +5860,17 @@ "GopClosedCadence": { "shape": "__integerMin0Max2147483647", "locationName": "gopClosedCadence", - "documentation": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0." + "documentation": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0." }, "GopSize": { "shape": "__doubleMin0", "locationName": "gopSize", - "documentation": "Use this setting only when you set GOP mode control (GopSizeUnits) to Specified, frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control (GopSizeUnits). If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto (AUTO). If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group." + "documentation": "Use this setting only when you set GOP mode control to Specified, frames or Specified, seconds. Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control. If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto. If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group." }, "GopSizeUnits": { "shape": "H264GopSizeUnits", "locationName": "gopSizeUnits", - "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize)." + "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size." }, "HrdBufferFinalFillPercentage": { "shape": "__integerMin0Max100", @@ -5890,7 +5890,7 @@ "InterlaceMode": { "shape": "H264InterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "MaxBitrate": { "shape": "__integerMin1000Max1152000000", @@ -5900,7 +5900,7 @@ "MinIInterval": { "shape": "__integerMin0Max30", "locationName": "minIInterval", - "documentation": "Use this setting only when you also enable Scene change detection (SceneChangeDetect). This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval (minIInterval) to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." + "documentation": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." }, "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", @@ -5915,17 +5915,17 @@ "ParControl": { "shape": "H264ParControl", "locationName": "parControl", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "H264QualityTuningLevel", @@ -5935,7 +5935,7 @@ "QvbrSettings": { "shape": "H264QvbrSettings", "locationName": "qvbrSettings", - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." }, "RateControlMode": { "shape": "H264RateControlMode", @@ -5950,12 +5950,12 @@ "ScanTypeConversionMode": { "shape": "H264ScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SceneChangeDetect": { "shape": "H264SceneChangeDetect", "locationName": "sceneChangeDetect", - "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." + "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." }, "Slices": { "shape": "__integerMin1Max32", @@ -5965,17 +5965,17 @@ "SlowPal": { "shape": "H264SlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." }, "Softness": { "shape": "__integerMin0Max128", "locationName": "softness", - "documentation": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." + "documentation": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness or by enabling a noise reducer filter. The Softness setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." }, "SpatialAdaptiveQuantization": { "shape": "H264SpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "Only use this setting when you change the default value, Auto (AUTO), for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (H264AdaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "documentation": "Only use this setting when you change the default value, Auto, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." }, "Syntax": { "shape": "H264Syntax", @@ -5985,12 +5985,12 @@ "Telecine": { "shape": "H264Telecine", "locationName": "telecine", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." }, "TemporalAdaptiveQuantization": { "shape": "H264TemporalAdaptiveQuantization", "locationName": "temporalAdaptiveQuantization", - "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization). To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO." + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization. To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO." }, "UnregisteredSeiTimecode": { "shape": "H264UnregisteredSeiTimecode", @@ -5998,11 +5998,11 @@ "documentation": "Inserts timecode for each frame as 4 bytes of an unregistered SEI message." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value H_264." + "documentation": "Required when you set Codec to the value H_264." }, "H264SlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -6010,7 +6010,7 @@ }, "H264SpatialAdaptiveQuantization": { "type": "string", - "documentation": "Only use this setting when you change the default value, Auto (AUTO), for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (H264AdaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "documentation": "Only use this setting when you change the default value, Auto, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264SpatialAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to set H264SpatialAdaptiveQuantization to Disabled. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher. To manually enable or disable H264SpatialAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -6026,7 +6026,7 @@ }, "H264Telecine": { "type": "string", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "enum": [ "NONE", "SOFT", @@ -6035,7 +6035,7 @@ }, "H264TemporalAdaptiveQuantization": { "type": "string", - "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled (ENABLED). Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled (DISABLED). Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization). To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization (H264AdaptiveQuantization) to a value other than AUTO.", + "documentation": "Only use this setting when you change the default value, AUTO, for the setting H264AdaptiveQuantization. When you keep all defaults, excluding H264AdaptiveQuantization and all other adaptive quantization from your JSON job specification, MediaConvert automatically applies the best types of quantization for your video content. When you set H264AdaptiveQuantization to a value other than AUTO, the default value for H264TemporalAdaptiveQuantization is Enabled. Keep this default value to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to set H264TemporalAdaptiveQuantization to Disabled. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization. To manually enable or disable H264TemporalAdaptiveQuantization, you must set Adaptive quantization to a value other than AUTO.", "enum": [ "DISABLED", "ENABLED" @@ -6051,7 +6051,7 @@ }, "H265AdaptiveQuantization": { "type": "string", - "documentation": "When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low (LOW), Medium (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive Quantization (H265FlickerAdaptiveQuantization), to further control the quantization filter. Set Adaptive Quantization to Off (OFF) to apply no quantization to your output.", + "documentation": "When you set Adaptive Quantization to Auto, or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, and Flicker Adaptive Quantization, to further control the quantization filter. Set Adaptive Quantization to Off to apply no quantization to your output.", "enum": [ "OFF", "LOW", @@ -6106,7 +6106,7 @@ }, "H265DynamicSubGop": { "type": "string", - "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", "enum": [ "ADAPTIVE", "STATIC" @@ -6114,7 +6114,7 @@ }, "H265FlickerAdaptiveQuantization": { "type": "string", - "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off (OFF).", + "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off.", "enum": [ "DISABLED", "ENABLED" @@ -6122,7 +6122,7 @@ }, "H265FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -6147,7 +6147,7 @@ }, "H265GopSizeUnits": { "type": "string", - "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize).", + "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size.", "enum": [ "FRAMES", "SECONDS", @@ -6156,7 +6156,7 @@ }, "H265InterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -6167,7 +6167,7 @@ }, "H265ParControl": { "type": "string", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -6175,7 +6175,7 @@ }, "H265QualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "SINGLE_PASS_HQ", @@ -6193,7 +6193,7 @@ "QvbrQualityLevel": { "shape": "__integerMin1Max10", "locationName": "qvbrQualityLevel", - "documentation": "Use this setting only when you set Rate control mode (RateControlMode) to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." + "documentation": "Use this setting only when you set Rate control mode to QVBR. Specify the target quality level for this output. MediaConvert determines the right number of bits to use for each part of the video to maintain the video quality that you specify. When you keep the default value, AUTO, MediaConvert picks a quality level for you, based on characteristics of your input video. If you prefer to specify a quality level, specify a number from 1 through 10. Use higher numbers for greater quality. Level 10 results in nearly lossless compression. The quality level for most broadcast-quality transcodes is between 6 and 9. Optionally, to specify a value between whole numbers, also provide a value for the setting qvbrQualityLevelFineTune. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33." }, "QvbrQualityLevelFineTune": { "shape": "__doubleMin0Max1", @@ -6201,7 +6201,7 @@ "documentation": "Optional. Specify a value here to set the QVBR quality to a level that is between whole numbers. For example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. MediaConvert rounds your QVBR quality level to the nearest third of a whole number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune to .25, your actual QVBR quality level is 7.33." } }, - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." }, "H265RateControlMode": { "type": "string", @@ -6223,7 +6223,7 @@ }, "H265ScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -6231,7 +6231,7 @@ }, "H265SceneChangeDetect": { "type": "string", - "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", + "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr.", "enum": [ "DISABLED", "ENABLED", @@ -6244,7 +6244,7 @@ "AdaptiveQuantization": { "shape": "H265AdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "When you set Adaptive Quantization (H265AdaptiveQuantization) to Auto (AUTO), or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low (LOW), Medium (MEDIUM), High (HIGH), Higher (HIGHER), or Max (MAX) to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization (H265SpatialAdaptiveQuantization), Temporal Adaptive Quantization (H265TemporalAdaptiveQuantization), and Flicker Adaptive Quantization (H265FlickerAdaptiveQuantization), to further control the quantization filter. Set Adaptive Quantization to Off (OFF) to apply no quantization to your output." + "documentation": "When you set Adaptive Quantization to Auto, or leave blank, MediaConvert automatically applies quantization to improve the video quality of your output. Set Adaptive Quantization to Low, Medium, High, Higher, or Max to manually control the strength of the quantization filter. When you do, you can specify a value for Spatial Adaptive Quantization, Temporal Adaptive Quantization, and Flicker Adaptive Quantization, to further control the quantization filter. Set Adaptive Quantization to Off to apply no quantization to your output." }, "AlternateTransferFunctionSei": { "shape": "H265AlternateTransferFunctionSei", @@ -6279,12 +6279,12 @@ "FlickerAdaptiveQuantization": { "shape": "H265FlickerAdaptiveQuantization", "locationName": "flickerAdaptiveQuantization", - "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off (OFF)." + "documentation": "Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set adaptiveQuantization to a value other than Off." }, "FramerateControl": { "shape": "H265FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "H265FramerateConversionAlgorithm", @@ -6309,17 +6309,17 @@ "GopClosedCadence": { "shape": "__integerMin0Max2147483647", "locationName": "gopClosedCadence", - "documentation": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0." + "documentation": "Specify the relative frequency of open to closed GOPs in this output. For example, if you want to allow four open GOPs and then require a closed GOP, set this value to 5. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. If you do explicitly specify a value, for segmented outputs, don't set this value to 0." }, "GopSize": { "shape": "__doubleMin0", "locationName": "gopSize", - "documentation": "Use this setting only when you set GOP mode control (GopSizeUnits) to Specified, frames (FRAMES) or Specified, seconds (SECONDS). Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control (GopSizeUnits). If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto (AUTO). If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group." + "documentation": "Use this setting only when you set GOP mode control to Specified, frames or Specified, seconds. Specify the GOP length using a whole number of frames or a decimal value of seconds. MediaConvert will interpret this value as frames or seconds depending on the value you choose for GOP mode control. If you want to allow MediaConvert to automatically determine GOP size, leave GOP size blank and set GOP mode control to Auto. If your output group specifies HLS, DASH, or CMAF, leave GOP size blank and set GOP mode control to Auto in each output in your output group." }, "GopSizeUnits": { "shape": "H265GopSizeUnits", "locationName": "gopSizeUnits", - "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto (AUTO) and and leave GOP size (GopSize) blank. By default, if you don't specify GOP mode control (GopSizeUnits), MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames (FRAMES) or Specified, seconds (SECONDS) and then provide the GOP length in the related setting GOP size (GopSize)." + "documentation": "Specify how the transcoder determines GOP size for this output. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, choose Auto and and leave GOP size blank. By default, if you don't specify GOP mode control, MediaConvert will use automatic behavior. If your output group specifies HLS, DASH, or CMAF, set GOP mode control to Auto and leave GOP size blank in each output in your output group. To explicitly specify the GOP length, choose Specified, frames or Specified, seconds and then provide the GOP length in the related setting GOP size." }, "HrdBufferFinalFillPercentage": { "shape": "__integerMin0Max100", @@ -6339,7 +6339,7 @@ "InterlaceMode": { "shape": "H265InterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "MaxBitrate": { "shape": "__integerMin1000Max1466400000", @@ -6349,7 +6349,7 @@ "MinIInterval": { "shape": "__integerMin0Max30", "locationName": "minIInterval", - "documentation": "Use this setting only when you also enable Scene change detection (SceneChangeDetect). This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, keep the default value by leaving this setting out of your JSON job specification. In the console, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval (minIInterval) to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." + "documentation": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. We recommend that you have the transcoder automatically choose this value for you based on characteristics of your input video. To enable this automatic behavior, do this by keeping the default empty value. When you explicitly specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." }, "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", @@ -6364,27 +6364,27 @@ "ParControl": { "shape": "H265ParControl", "locationName": "parControl", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "H265QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "QvbrSettings": { "shape": "H265QvbrSettings", "locationName": "qvbrSettings", - "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode (RateControlMode)." + "documentation": "Settings for quality-defined variable bitrate encoding with the H.265 codec. Use these settings only when you set QVBR for Rate control mode." }, "RateControlMode": { "shape": "H265RateControlMode", @@ -6399,12 +6399,12 @@ "ScanTypeConversionMode": { "shape": "H265ScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SceneChangeDetect": { "shape": "H265SceneChangeDetect", "locationName": "sceneChangeDetect", - "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." + "documentation": "Enable this setting to insert I-frames at scene changes that the service automatically detects. This improves video quality and is enabled by default. If this output uses QVBR, choose Transition detection for further video quality improvement. For more information about QVBR, see https://docs.aws.amazon.com/console/mediaconvert/cbr-vbr-qvbr." }, "Slices": { "shape": "__integerMin1Max32", @@ -6414,22 +6414,22 @@ "SlowPal": { "shape": "H265SlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." }, "SpatialAdaptiveQuantization": { "shape": "H265SpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." }, "Telecine": { "shape": "H265Telecine", "locationName": "telecine", - "documentation": "This field applies only if the Streams > Advanced > Framerate (framerate) field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced Mode field (interlace_mode) to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i." + "documentation": "This field applies only if the Streams > Advanced > Framerate field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field and the Streams > Advanced > Interlaced Mode field to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i." }, "TemporalAdaptiveQuantization": { "shape": "H265TemporalAdaptiveQuantization", "locationName": "temporalAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization)." + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization." }, "TemporalIds": { "shape": "H265TemporalIds", @@ -6456,7 +6456,7 @@ }, "H265SlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -6464,7 +6464,7 @@ }, "H265SpatialAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "enum": [ "DISABLED", "ENABLED" @@ -6472,7 +6472,7 @@ }, "H265Telecine": { "type": "string", - "documentation": "This field applies only if the Streams > Advanced > Framerate (framerate) field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced Mode field (interlace_mode) to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i.", + "documentation": "This field applies only if the Streams > Advanced > Framerate field is set to 29.970. This field works with the Streams > Advanced > Preprocessors > Deinterlacer field and the Streams > Advanced > Interlaced Mode field to identify the scan type for the output: Progressive, Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output from 23.976 input. - Soft: produces 23.976; the player converts this output to 29.97i.", "enum": [ "NONE", "SOFT", @@ -6481,7 +6481,7 @@ }, "H265TemporalAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization).", + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization.", "enum": [ "DISABLED", "ENABLED" @@ -6635,7 +6635,7 @@ }, "HlsAudioOnlyContainer": { "type": "string", - "documentation": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic (AUTOMATIC) to create a raw audio-only file with no container. Regardless of the value that you specify here, if this output has video, the service will place outputs into an MPEG2-TS container.", + "documentation": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic to create a raw audio-only file with no container. Regardless of the value that you specify here, if this output has video, the service will place outputs into an MPEG2-TS container.", "enum": [ "AUTOMATIC", "M2TS" @@ -6643,7 +6643,7 @@ }, "HlsAudioOnlyHeader": { "type": "string", - "documentation": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include (INCLUDE), to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only headers from your audio segments.", + "documentation": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include, to output audio-only headers. Choose Exclude to remove the audio-only headers from your audio segments.", "enum": [ "INCLUDE", "EXCLUDE" @@ -6696,7 +6696,7 @@ }, "HlsCaptionSegmentLengthControl": { "type": "string", - "documentation": "Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long.", + "documentation": "Set Caption segment length control to Match video to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments to create caption segments that are 300 seconds long.", "enum": [ "LARGE_SEGMENTS", "MATCH_VIDEO" @@ -6704,7 +6704,7 @@ }, "HlsClientCache": { "type": "string", - "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header.", + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header.", "enum": [ "DISABLED", "ENABLED" @@ -6720,7 +6720,7 @@ }, "HlsDescriptiveVideoServiceFlag": { "type": "string", - "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", + "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation.", "enum": [ "DONT_FLAG", "FLAG" @@ -6799,7 +6799,7 @@ "AudioOnlyHeader": { "shape": "HlsAudioOnlyHeader", "locationName": "audioOnlyHeader", - "documentation": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include (INCLUDE), to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only headers from your audio segments." + "documentation": "Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include, to output audio-only headers. Choose Exclude to remove the audio-only headers from your audio segments." }, "BaseUrl": { "shape": "__string", @@ -6819,12 +6819,12 @@ "CaptionSegmentLengthControl": { "shape": "HlsCaptionSegmentLengthControl", "locationName": "captionSegmentLengthControl", - "documentation": "Set Caption segment length control (CaptionSegmentLengthControl) to Match video (MATCH_VIDEO) to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments (LARGE_SEGMENTS) to create caption segments that are 300 seconds long." + "documentation": "Set Caption segment length control to Match video to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments to create caption segments that are 300 seconds long." }, "ClientCache": { "shape": "HlsClientCache", "locationName": "clientCache", - "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled (ENABLED) and control caching in your video distribution set up. For example, use the Cache-Control http header." + "documentation": "Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header." }, "CodecSpecification": { "shape": "HlsCodecSpecification", @@ -6834,7 +6834,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -6854,7 +6854,7 @@ "ImageBasedTrickPlay": { "shape": "HlsImageBasedTrickPlay", "locationName": "imageBasedTrickPlay", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, "ImageBasedTrickPlaySettings": { "shape": "HlsImageBasedTrickPlaySettings", @@ -6909,12 +6909,12 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (HlsSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." }, "SegmentLengthControl": { "shape": "HlsSegmentLengthControl", "locationName": "segmentLengthControl", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." }, "SegmentsPerSubdirectory": { "shape": "__integerMin1Max2147483647", @@ -6934,12 +6934,12 @@ "TimedMetadataId3Frame": { "shape": "HlsTimedMetadataId3Frame", "locationName": "timedMetadataId3Frame", - "documentation": "Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE)." + "documentation": "Specify the type of the ID3 frame to use for ID3 timestamps in your output. To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. To exclude ID3 timestamps: Set ID3 timestamp frame type to None." }, "TimedMetadataId3Period": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "timedMetadataId3Period", - "documentation": "Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type (timedMetadataId3Frame) to PRIV (PRIV) or TDRL (TDRL), and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." + "documentation": "Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type to PRIV or TDRL, and set ID3 metadata to Passthrough." }, "TimestampDeltaMilliseconds": { "shape": "__integerMinNegative2147483648Max2147483647", @@ -6947,11 +6947,11 @@ "documentation": "Provides an extra millisecond delta offset to fine tune the timestamps." } }, - "documentation": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to HLS_GROUP_SETTINGS." + "documentation": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "HlsIFrameOnlyManifest": { "type": "string", - "documentation": "Choose Include (INCLUDE) to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE).", + "documentation": "Choose Include to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude.", "enum": [ "INCLUDE", "EXCLUDE" @@ -6959,7 +6959,7 @@ }, "HlsImageBasedTrickPlay": { "type": "string", - "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", + "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md", "enum": [ "NONE", "THUMBNAIL", @@ -7106,7 +7106,7 @@ }, "HlsSegmentLengthControl": { "type": "string", - "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "enum": [ "EXACT", "GOP_MULTIPLE" @@ -7123,7 +7123,7 @@ "AudioOnlyContainer": { "shape": "HlsAudioOnlyContainer", "locationName": "audioOnlyContainer", - "documentation": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic (AUTOMATIC) to create an audio-only file in a raw container. Regardless of the value that you specify here, if this output has video, the service will place the output into an MPEG2-TS container." + "documentation": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic to create an audio-only file in a raw container. Regardless of the value that you specify here, if this output has video, the service will place the output into an MPEG2-TS container." }, "AudioRenditionSets": { "shape": "__string", @@ -7138,12 +7138,12 @@ "DescriptiveVideoServiceFlag": { "shape": "HlsDescriptiveVideoServiceFlag", "locationName": "descriptiveVideoServiceFlag", - "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag (FLAG), MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag (DONT_FLAG), MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." + "documentation": "Specify whether to flag this audio track as descriptive video service (DVS) in your HLS parent manifest. When you choose Flag, MediaConvert includes the parameter CHARACTERISTICS=\"public.accessibility.describes-video\" in the EXT-X-MEDIA entry for this track. When you keep the default choice, Don't flag, MediaConvert leaves this parameter out. The DVS flag can help with accessibility on Apple devices. For more information, see the Apple documentation." }, "IFrameOnlyManifest": { "shape": "HlsIFrameOnlyManifest", "locationName": "iFrameOnlyManifest", - "documentation": "Choose Include (INCLUDE) to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude (EXCLUDE)." + "documentation": "Choose Include to have MediaConvert generate a child manifest that lists only the I-frames for this rendition, in addition to your regular manifest for this rendition. You might use this manifest as part of a workflow that creates preview functions for your video. MediaConvert adds both the I-frame only child manifest and the regular child manifest to the parent manifest. When you don't need the I-frame only child manifest, keep the default value Exclude." }, "SegmentModifier": { "shape": "__string", @@ -7171,7 +7171,7 @@ }, "HlsTimedMetadataId3Frame": { "type": "string", - "documentation": "Specify the type of the ID3 frame (timedMetadataId3Frame) to use for ID3 timestamps (timedMetadataId3Period) in your output. To include ID3 timestamps: Specify PRIV (PRIV) or TDRL (TDRL) and set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). To exclude ID3 timestamps: Set ID3 timestamp frame type to None (NONE).", + "documentation": "Specify the type of the ID3 frame to use for ID3 timestamps in your output. To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. To exclude ID3 timestamps: Set ID3 timestamp frame type to None.", "enum": [ "NONE", "PRIV", @@ -7205,15 +7205,15 @@ "Id3": { "shape": "__stringPatternAZaZ0902", "locationName": "id3", - "documentation": "Use ID3 tag (Id3) to provide a fully formed ID3 tag in base64-encode format." + "documentation": "Use ID3 tag to provide a fully formed ID3 tag in base64-encode format." }, "Timecode": { "shape": "__stringPattern010920405090509092", "locationName": "timecode", - "documentation": "Provide a Timecode (TimeCode) in HH:MM:SS:FF or HH:MM:SS;FF format." + "documentation": "Provide a Timecode in HH:MM:SS:FF or HH:MM:SS;FF format." } }, - "documentation": "To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) to specify the base 64 encoded string and use Timecode (TimeCode) to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion (Id3Insertion)." + "documentation": "To insert ID3 tags in your output, specify two values. Use ID3 tag to specify the base 64 encoded string and use Timecode to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion." }, "ImageInserter": { "type": "structure", @@ -7253,7 +7253,7 @@ "documentation": "Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions." } }, - "documentation": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to IMSC." + "documentation": "Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." }, "ImscStylePassthrough": { "type": "string", @@ -7279,12 +7279,12 @@ "AudioSelectorGroups": { "shape": "__mapOfAudioSelectorGroup", "locationName": "audioSelectorGroups", - "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." + "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." }, "AudioSelectors": { "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." + "documentation": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "CaptionSelectors": { "shape": "__mapOfCaptionSelector", @@ -7294,12 +7294,12 @@ "Crop": { "shape": "Rectangle", "locationName": "crop", - "documentation": "Use Cropping selection (crop) to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection (crop)." + "documentation": "Use Cropping selection to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection." }, "DeblockFilter": { "shape": "InputDeblockFilter", "locationName": "deblockFilter", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." + "documentation": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." }, "DecryptionSettings": { "shape": "InputDecryptionSettings", @@ -7309,7 +7309,7 @@ "DenoiseFilter": { "shape": "InputDenoiseFilter", "locationName": "denoiseFilter", - "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." + "documentation": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." }, "DolbyVisionMetadataXml": { "shape": "__stringMin14PatternS3XmlXMLHttpsXmlXML", @@ -7319,7 +7319,7 @@ "FileInput": { "shape": "__stringPatternS3Https", "locationName": "fileInput", - "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* (SupplementalImps) to specify any supplemental IMPs that contain assets referenced by the CPL." + "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL." }, "FilterEnable": { "shape": "InputFilterEnable", @@ -7339,27 +7339,27 @@ "InputClippings": { "shape": "__listOfInputClipping", "locationName": "inputClippings", - "documentation": "(InputClippings) contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." + "documentation": "Contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." }, "InputScanType": { "shape": "InputScanType", "locationName": "inputScanType", - "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." + "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." }, "Position": { "shape": "Rectangle", "locationName": "position", - "documentation": "Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement (position). If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior (scalingBehavior)." + "documentation": "Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement. If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD to Respond. If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior." }, "ProgramNumber": { "shape": "__integerMin1Max2147483647", "locationName": "programNumber", - "documentation": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." + "documentation": "Use Program to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." }, "PsiControl": { "shape": "InputPsiControl", "locationName": "psiControl", - "documentation": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data." + "documentation": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data." }, "SupplementalImps": { "shape": "__listOf__stringPatternS3ASSETMAPXml", @@ -7369,12 +7369,12 @@ "TimecodeSource": { "shape": "InputTimecodeSource", "locationName": "timecodeSource", - "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "documentation": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "TimecodeStart": { "shape": "__stringMin11Max11Pattern01D20305D205D", "locationName": "timecodeStart", - "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "VideoGenerator": { "shape": "InputVideoGenerator", @@ -7395,19 +7395,19 @@ "EndTimecode": { "shape": "__stringPattern010920405090509092", "locationName": "endTimecode", - "documentation": "Set End timecode (EndTimecode) to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings (InputTimecodeSource). For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00." + "documentation": "Set End timecode to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00." }, "StartTimecode": { "shape": "__stringPattern010920405090509092", "locationName": "startTimecode", - "documentation": "Set Start timecode (StartTimecode) to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00." + "documentation": "Set Start timecode to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00." } }, "documentation": "To transcode only portions of your input, include one input clip for each part of your input that you want in your output. All input clips that you specify will be included in every output of the job. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html." }, "InputDeblockFilter": { "type": "string", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", + "documentation": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.", "enum": [ "ENABLED", "DISABLED" @@ -7441,7 +7441,7 @@ }, "InputDenoiseFilter": { "type": "string", - "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.", + "documentation": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.", "enum": [ "ENABLED", "DISABLED" @@ -7466,7 +7466,7 @@ }, "InputPsiControl": { "type": "string", - "documentation": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data.", + "documentation": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data.", "enum": [ "IGNORE_PSI", "USE_PSI" @@ -7474,7 +7474,7 @@ }, "InputRotate": { "type": "string", - "documentation": "Use Rotate (InputRotate) to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata.", + "documentation": "Use Rotate to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata.", "enum": [ "DEGREE_0", "DEGREES_90", @@ -7485,7 +7485,7 @@ }, "InputSampleRange": { "type": "string", - "documentation": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow (FOLLOW), for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata.", + "documentation": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow, for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata.", "enum": [ "FOLLOW", "FULL_RANGE", @@ -7494,7 +7494,7 @@ }, "InputScanType": { "type": "string", - "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.", + "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.", "enum": [ "AUTO", "PSF" @@ -7516,12 +7516,12 @@ "AudioSelectorGroups": { "shape": "__mapOfAudioSelectorGroup", "locationName": "audioSelectorGroups", - "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab (AudioDescription). Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." + "documentation": "Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group." }, "AudioSelectors": { "shape": "__mapOfAudioSelector", "locationName": "audioSelectors", - "documentation": "Use Audio selectors (AudioSelectors) to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." + "documentation": "Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input." }, "CaptionSelectors": { "shape": "__mapOfCaptionSelector", @@ -7531,17 +7531,17 @@ "Crop": { "shape": "Rectangle", "locationName": "crop", - "documentation": "Use Cropping selection (crop) to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection (crop)." + "documentation": "Use Cropping selection to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection." }, "DeblockFilter": { "shape": "InputDeblockFilter", "locationName": "deblockFilter", - "documentation": "Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." + "documentation": "Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs." }, "DenoiseFilter": { "shape": "InputDenoiseFilter", "locationName": "denoiseFilter", - "documentation": "Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." + "documentation": "Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs." }, "DolbyVisionMetadataXml": { "shape": "__stringMin14PatternS3XmlXMLHttpsXmlXML", @@ -7566,37 +7566,37 @@ "InputClippings": { "shape": "__listOfInputClipping", "locationName": "inputClippings", - "documentation": "(InputClippings) contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." + "documentation": "Contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them." }, "InputScanType": { "shape": "InputScanType", "locationName": "inputScanType", - "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto (AUTO). Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." + "documentation": "When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts." }, "Position": { "shape": "Rectangle", "locationName": "position", - "documentation": "Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement (position). If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD (RespondToAfd) to Respond (RESPOND). If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior (scalingBehavior)." + "documentation": "Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement. If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD to Respond. If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior." }, "ProgramNumber": { "shape": "__integerMin1Max2147483647", "locationName": "programNumber", - "documentation": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." + "documentation": "Use Program to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default." }, "PsiControl": { "shape": "InputPsiControl", "locationName": "psiControl", - "documentation": "Set PSI control (InputPsiControl) for transport stream inputs to specify which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio and video. * Use PSI - Scan only PSI data." + "documentation": "Set PSI control for transport stream inputs to specify which data the demux process to scans.\n* Ignore PSI - Scan all PIDs for audio and video.\n* Use PSI - Scan only PSI data." }, "TimecodeSource": { "shape": "InputTimecodeSource", "locationName": "timecodeSource", - "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "documentation": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "TimecodeStart": { "shape": "__stringMin11Max11Pattern01D20305D205D", "locationName": "timecodeStart", - "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings (InputTimecodeSource), to Specified start (SPECIFIEDSTART). For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." + "documentation": "Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode." }, "VideoSelector": { "shape": "VideoSelector", @@ -7608,7 +7608,7 @@ }, "InputTimecodeSource": { "type": "string", - "documentation": "Use this Timecode source setting, located under the input settings (InputTimecodeSource), to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded (EMBEDDED) to use the timecodes in your input video. Choose Start at zero (ZEROBASED) to start the first frame at zero. Choose Specified start (SPECIFIEDSTART) to start the first frame at the timecode that you specify in the setting Start timecode (timecodeStart). If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", + "documentation": "Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.", "enum": [ "EMBEDDED", "ZEROBASED", @@ -7672,7 +7672,7 @@ "Opacity": { "shape": "__integerMin0Max100", "locationName": "opacity", - "documentation": "Use Opacity (Opacity) to specify how much of the underlying video shows through the inserted image. 0 is transparent and 100 is fully opaque. Default is 50." + "documentation": "Use Opacity to specify how much of the underlying video shows through the inserted image. 0 is transparent and 100 is fully opaque. Default is 50." }, "StartTime": { "shape": "__stringPattern01D20305D205D", @@ -7897,7 +7897,7 @@ "Inputs": { "shape": "__listOfInput", "locationName": "inputs", - "documentation": "Use Inputs (inputs) to define source file used in the transcode job. There can be multiple inputs add in a job. These inputs will be concantenated together to create the output." + "documentation": "Use Inputs to define source file used in the transcode job. There can be multiple inputs add in a job. These inputs will be concantenated together to create the output." }, "KantarWatermark": { "shape": "KantarWatermarkSettings", @@ -7912,7 +7912,7 @@ "NielsenConfiguration": { "shape": "NielsenConfiguration", "locationName": "nielsenConfiguration", - "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting." + "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job." }, "NielsenNonLinearWatermark": { "shape": "NielsenNonLinearWatermarkSettings", @@ -7922,7 +7922,7 @@ "OutputGroups": { "shape": "__listOfOutputGroup", "locationName": "outputGroups", - "documentation": "(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" + "documentation": "Contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in is a group of settings that apply to the whole group. This required object depends on the value you set for Type. Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" }, "TimecodeConfig": { "shape": "TimecodeConfig", @@ -7932,7 +7932,7 @@ "TimedMetadataInsertion": { "shape": "TimedMetadataInsertion", "locationName": "timedMetadataInsertion", - "documentation": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." + "documentation": "Insert user-defined custom ID3 metadata at timecodes that you specify. In each output that you want to include this metadata, you must set ID3 metadata to Passthrough." } }, "documentation": "JobSettings contains all the transcode settings for a job." @@ -8058,7 +8058,7 @@ "Inputs": { "shape": "__listOfInputTemplate", "locationName": "inputs", - "documentation": "Use Inputs (inputs) to define the source file used in the transcode job. There can only be one input in a job template. Using the API, you can include multiple inputs when referencing a job template." + "documentation": "Use Inputs to define the source file used in the transcode job. There can only be one input in a job template. Using the API, you can include multiple inputs when referencing a job template." }, "KantarWatermark": { "shape": "KantarWatermarkSettings", @@ -8073,7 +8073,7 @@ "NielsenConfiguration": { "shape": "NielsenConfiguration", "locationName": "nielsenConfiguration", - "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting." + "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job." }, "NielsenNonLinearWatermark": { "shape": "NielsenNonLinearWatermarkSettings", @@ -8083,7 +8083,7 @@ "OutputGroups": { "shape": "__listOfOutputGroup", "locationName": "outputGroups", - "documentation": "(OutputGroups) contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in (OutputGroups) is a group of settings that apply to the whole group. This required object depends on the value you set for (Type) under (OutputGroups)>(OutputGroupSettings). Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" + "documentation": "Contains one group of settings for each set of outputs that share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, MXF, and no container) are grouped in a single output group as well. Required in is a group of settings that apply to the whole group. This required object depends on the value you set for Type. Type, settings object pairs are as follows. * FILE_GROUP_SETTINGS, FileGroupSettings * HLS_GROUP_SETTINGS, HlsGroupSettings * DASH_ISO_GROUP_SETTINGS, DashIsoGroupSettings * MS_SMOOTH_GROUP_SETTINGS, MsSmoothGroupSettings * CMAF_GROUP_SETTINGS, CmafGroupSettings" }, "TimecodeConfig": { "shape": "TimecodeConfig", @@ -8093,7 +8093,7 @@ "TimedMetadataInsertion": { "shape": "TimedMetadataInsertion", "locationName": "timedMetadataInsertion", - "documentation": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." + "documentation": "Insert user-defined custom ID3 metadata at timecodes that you specify. In each output that you want to include this metadata, you must set ID3 metadata to Passthrough." } }, "documentation": "JobTemplateSettings contains all the transcode settings saved in the template that will be applied to jobs created from it." @@ -8595,7 +8595,7 @@ }, "M2tsAudioDuration": { "type": "string", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "enum": [ "DEFAULT_CODEC_DURATION", "MATCH_VIDEO_DURATION" @@ -8611,7 +8611,7 @@ }, "M2tsDataPtsControl": { "type": "string", - "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values.", + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value to allow all PTS values.", "enum": [ "AUTO", "ALIGN_TO_VIDEO" @@ -8643,7 +8643,7 @@ }, "M2tsForceTsVideoEbpOrder": { "type": "string", - "documentation": "Keep the default value (DEFAULT) unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force (FORCE).", + "documentation": "Keep the default value unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force.", "enum": [ "FORCE", "DEFAULT" @@ -8690,11 +8690,11 @@ "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream generated by ESAM." } }, - "documentation": "Settings for SCTE-35 signals from ESAM. Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "documentation": "Settings for SCTE-35 signals from ESAM. Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." }, "M2tsScte35Source": { "type": "string", - "documentation": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam).", + "documentation": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None. Also provide the ESAM XML as a string in the setting Signal processing notification XML. Also enable ESAM SCTE-35 (include the property scte35Esam).", "enum": [ "PASSTHROUGH", "NONE" @@ -8731,7 +8731,7 @@ "AudioDuration": { "shape": "M2tsAudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", @@ -8756,17 +8756,17 @@ "DataPTSControl": { "shape": "M2tsDataPtsControl", "locationName": "dataPTSControl", - "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values." + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value to allow all PTS values." }, "DvbNitSettings": { "shape": "DvbNitSettings", "locationName": "dvbNitSettings", - "documentation": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Network Information Table (NIT) in the transport stream of this output." }, "DvbSdtSettings": { "shape": "DvbSdtSettings", "locationName": "dvbSdtSettings", - "documentation": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Service Description Table (SDT) in the transport stream of this output." }, "DvbSubPids": { "shape": "__listOf__integerMin32Max8182", @@ -8776,7 +8776,7 @@ "DvbTdtSettings": { "shape": "DvbTdtSettings", "locationName": "dvbTdtSettings", - "documentation": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output. When you work directly in your JSON job specification, include this object only when your job has a transport stream output and the container settings contain the object M2tsSettings." + "documentation": "Use these settings to insert a DVB Time and Date Table (TDT) in the transport stream of this output." }, "DvbTeletextPid": { "shape": "__integerMin32Max8182", @@ -8801,7 +8801,7 @@ "ForceTsVideoEbpOrder": { "shape": "M2tsForceTsVideoEbpOrder", "locationName": "forceTsVideoEbpOrder", - "documentation": "Keep the default value (DEFAULT) unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force (FORCE)." + "documentation": "Keep the default value unless you know that your audio EBP markers are incorrectly appearing before your video EBP markers. To correct this problem, set this value to Force." }, "FragmentTime": { "shape": "__doubleMin0", @@ -8846,7 +8846,7 @@ "PcrPid": { "shape": "__integerMin32Max8182", "locationName": "pcrPid", - "documentation": "Specify the packet identifier (PID) for the program clock reference (PCR) in this output. If you do not specify a value, the service will use the value for Video PID (VideoPid)." + "documentation": "Specify the packet identifier (PID) for the program clock reference (PCR) in this output. If you do not specify a value, the service will use the value for Video PID." }, "PmtInterval": { "shape": "__integerMin0Max1000", @@ -8866,7 +8866,7 @@ "ProgramNumber": { "shape": "__integerMin0Max65535", "locationName": "programNumber", - "documentation": "Use Program number (programNumber) to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data." + "documentation": "Use Program number to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data." }, "RateMode": { "shape": "M2tsRateMode", @@ -8876,7 +8876,7 @@ "Scte35Esam": { "shape": "M2tsScte35Esam", "locationName": "scte35Esam", - "documentation": "Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "documentation": "Include this in your job settings to put SCTE-35 markers in your HLS and transport stream outputs at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." }, "Scte35Pid": { "shape": "__integerMin32Max8182", @@ -8886,7 +8886,7 @@ "Scte35Source": { "shape": "M2tsScte35Source", "locationName": "scte35Source", - "documentation": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE). Also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml). Also enable ESAM SCTE-35 (include the property scte35Esam)." + "documentation": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None. Also provide the ESAM XML as a string in the setting Signal processing notification XML. Also enable ESAM SCTE-35 (include the property scte35Esam)." }, "SegmentationMarkers": { "shape": "M2tsSegmentationMarkers", @@ -8919,11 +8919,11 @@ "documentation": "Specify the packet identifier (PID) of the elementary video stream in the transport stream." } }, - "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container (ContainerType) is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." + "documentation": "MPEG-2 TS container settings. These apply to outputs in a File output group when the output's container is MPEG-2 Transport Stream (M2TS). In these assets, data is organized by the program map table (PMT). Each transport stream program contains subsets of data, including audio, video, and metadata. Each of these subsets of data has a numerical label called a packet identifier (PID). Each transport stream program corresponds to one MediaConvert output. The PMT lists the types of data in a program along with their PID. Downstream systems and players use the program map table to look up the PID for each type of data it accesses and then uses the PIDs to locate specific data within the asset." }, "M3u8AudioDuration": { "type": "string", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "enum": [ "DEFAULT_CODEC_DURATION", "MATCH_VIDEO_DURATION" @@ -8931,7 +8931,7 @@ }, "M3u8DataPtsControl": { "type": "string", - "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values.", + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value AUTO to allow all PTS values.", "enum": [ "AUTO", "ALIGN_TO_VIDEO" @@ -8955,7 +8955,7 @@ }, "M3u8Scte35Source": { "type": "string", - "documentation": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers (adMarkers) if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml).", + "documentation": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None if you don't want manifest conditioning. Choose Passthrough and choose Ad markers if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML.", "enum": [ "PASSTHROUGH", "NONE" @@ -8967,7 +8967,7 @@ "AudioDuration": { "shape": "M3u8AudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "AudioFramesPerPes": { "shape": "__integerMin0Max2147483647", @@ -8982,7 +8982,7 @@ "DataPTSControl": { "shape": "M3u8DataPtsControl", "locationName": "dataPTSControl", - "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values." + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value AUTO to allow all PTS values." }, "MaxPcrInterval": { "shape": "__integerMin0Max500", @@ -9037,12 +9037,12 @@ "Scte35Source": { "shape": "M3u8Scte35Source", "locationName": "scte35Source", - "documentation": "For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None (NONE) if you don't want manifest conditioning. Choose Passthrough (PASSTHROUGH) and choose Ad markers (adMarkers) if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML (sccXml)." + "documentation": "For SCTE-35 markers from your input-- Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want SCTE-35 markers in this output. For SCTE-35 markers from an ESAM XML document-- Choose None if you don't want manifest conditioning. Choose Passthrough and choose Ad markers if you do want manifest conditioning. In both cases, also provide the ESAM XML as a string in the setting Signal processing notification XML." }, "TimedMetadata": { "shape": "TimedMetadata", "locationName": "timedMetadata", - "documentation": "Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 metadata to None (NONE) or leave blank." + "documentation": "Set ID3 metadata to Passthrough to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period, and Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: set ID3 metadata to None or leave blank." }, "TimedMetadataPid": { "shape": "__integerMin32Max8182", @@ -9125,7 +9125,7 @@ "StartTime": { "shape": "__stringMin11Max11Pattern01D20305D205D", "locationName": "startTime", - "documentation": "Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or HH:MM:SS;FF). Make sure that the timecode you provide here takes into account how you have set up your timecode configuration under both job settings and input settings. The simplest way to do that is to set both to start at 0. If you need to set up your job to follow timecodes embedded in your source that don't start at zero, make sure that you specify a start time that is after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html Find job-wide and input timecode configuration settings in your JSON job settings specification at settings>timecodeConfig>source and settings>inputs>timecodeSource." + "documentation": "Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or HH:MM:SS;FF). Make sure that the timecode you provide here takes into account how you have set up your timecode configuration under both job settings and input settings. The simplest way to do that is to set both to start at 0. If you need to set up your job to follow timecodes embedded in your source that don't start at zero, make sure that you specify a start time that is after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html" } }, "documentation": "Overlay motion graphics on top of your video. The motion graphics that you specify here appear on all outputs in all output groups. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html." @@ -9260,7 +9260,7 @@ "Channels": { "shape": "__integerMin1Max2", "locationName": "channels", - "documentation": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in the console will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2." + "documentation": "Set Channels to specify the number of channels in this output audio track. Choosing Mono in will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2." }, "SampleRate": { "shape": "__integerMin32000Max48000", @@ -9268,7 +9268,7 @@ "documentation": "Sample rate in hz." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value MP2." + "documentation": "Required when you set Codec to the value MP2." }, "Mp3RateControlMode": { "type": "string", @@ -9289,7 +9289,7 @@ "Channels": { "shape": "__integerMin1Max2", "locationName": "channels", - "documentation": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." + "documentation": "Specify the number of channels in this output audio track. Choosing Mono gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." }, "RateControlMode": { "shape": "Mp3RateControlMode", @@ -9304,7 +9304,7 @@ "VbrQuality": { "shape": "__integerMin0Max9", "locationName": "vbrQuality", - "documentation": "Required when you set Bitrate control mode (rateControlMode) to VBR. Specify the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality)." + "documentation": "Required when you set Bitrate control mode to VBR. Specify the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality)." } }, "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3." @@ -9339,7 +9339,7 @@ "AudioDuration": { "shape": "CmfcAudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "CslgAtom": { "shape": "Mp4CslgAtom", @@ -9349,7 +9349,7 @@ "CttsVersion": { "shape": "__integerMin0Max1", "locationName": "cttsVersion", - "documentation": "Ignore this setting unless compliance to the CTTS box version specification matters in your workflow. Specify a value of 1 to set your CTTS box version to 1 and make your output compliant with the specification. When you specify a value of 1, you must also set CSLG atom (cslgAtom) to the value INCLUDE. Keep the default value 0 to set your CTTS box version to 0. This can provide backward compatibility for some players and packagers." + "documentation": "Ignore this setting unless compliance to the CTTS box version specification matters in your workflow. Specify a value of 1 to set your CTTS box version to 1 and make your output compliant with the specification. When you specify a value of 1, you must also set CSLG atom to the value INCLUDE. Keep the default value 0 to set your CTTS box version to 0. This can provide backward compatibility for some players and packagers." }, "FreeSpaceBox": { "shape": "Mp4FreeSpaceBox", @@ -9371,7 +9371,7 @@ }, "MpdAccessibilityCaptionHints": { "type": "string", - "documentation": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: ", + "documentation": "Optional. Choose Include to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude, to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: ", "enum": [ "INCLUDE", "EXCLUDE" @@ -9379,7 +9379,7 @@ }, "MpdAudioDuration": { "type": "string", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec.", "enum": [ "DEFAULT_CODEC_DURATION", "MATCH_VIDEO_DURATION" @@ -9387,7 +9387,7 @@ }, "MpdCaptionContainerType": { "type": "string", - "documentation": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw (RAW) for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files.", + "documentation": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files.", "enum": [ "RAW", "FRAGMENTED_MP4" @@ -9403,7 +9403,7 @@ }, "MpdManifestMetadataSignaling": { "type": "string", - "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough.", + "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough.", "enum": [ "ENABLED", "DISABLED" @@ -9411,7 +9411,7 @@ }, "MpdScte35Esam": { "type": "string", - "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", + "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML.", "enum": [ "INSERT", "NONE" @@ -9419,7 +9419,7 @@ }, "MpdScte35Source": { "type": "string", - "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output.", + "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output.", "enum": [ "PASSTHROUGH", "NONE" @@ -9431,17 +9431,17 @@ "AccessibilityCaptionHints": { "shape": "MpdAccessibilityCaptionHints", "locationName": "accessibilityCaptionHints", - "documentation": "Optional. Choose Include (INCLUDE) to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude (EXCLUDE), to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: " + "documentation": "Optional. Choose Include to have MediaConvert mark up your DASH manifest with elements for embedded 608 captions. This markup isn't generally required, but some video players require it to discover and play embedded 608 captions. Keep the default value, Exclude, to leave these elements out. When you enable this setting, this is the markup that MediaConvert includes in your manifest: " }, "AudioDuration": { "shape": "MpdAudioDuration", "locationName": "audioDuration", - "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration (MATCH_VIDEO_DURATION). In all other cases, keep the default value, Default codec duration (DEFAULT_CODEC_DURATION). When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." + "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, "CaptionContainerType": { "shape": "MpdCaptionContainerType", "locationName": "captionContainerType", - "documentation": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw (RAW) for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files." + "documentation": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files." }, "KlvMetadata": { "shape": "MpdKlvMetadata", @@ -9451,44 +9451,44 @@ "ManifestMetadataSignaling": { "shape": "MpdManifestMetadataSignaling", "locationName": "manifestMetadataSignaling", - "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata (TimedMetadata) to Passthrough." + "documentation": "To add an InbandEventStream element in your output MPD manifest for each type of event message, set Manifest metadata signaling to Enabled. For ID3 event messages, the InbandEventStream element schemeIdUri will be same value that you specify for ID3 metadata scheme ID URI. For SCTE35 event messages, the InbandEventStream element schemeIdUri will be \"urn:scte:scte35:2013:bin\". To leave these elements out of your output MPD manifest, set Manifest metadata signaling to Disabled. To enable Manifest metadata signaling, you must also set SCTE-35 source to Passthrough, ESAM SCTE-35 to insert, or ID3 metadata to Passthrough." }, "Scte35Esam": { "shape": "MpdScte35Esam", "locationName": "scte35Esam", - "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + "documentation": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML." }, "Scte35Source": { "shape": "MpdScte35Source", "locationName": "scte35Source", - "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output." + "documentation": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None if you don't want those SCTE-35 markers in this output." }, "TimedMetadata": { "shape": "MpdTimedMetadata", "locationName": "timedMetadata", - "documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank." + "documentation": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank." }, "TimedMetadataBoxVersion": { "shape": "MpdTimedMetadataBoxVersion", "locationName": "timedMetadataBoxVersion", - "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough." }, "TimedMetadataSchemeIdUri": { "shape": "__stringMax1000", "locationName": "timedMetadataSchemeIdUri", - "documentation": "Specify the event message box (eMSG) scheme ID URI (scheme_id_uri) for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) scheme ID URI for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. Leave blank to use the default value: https://aomedia.org/emsg/ID3 When you specify a value for ID3 metadata scheme ID URI, you must also set ID3 metadata to Passthrough." }, "TimedMetadataValue": { "shape": "__stringMax1000", "locationName": "timedMetadataValue", - "documentation": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata (timedMetadata) to Passthrough." + "documentation": "Specify the event message box (eMSG) value for ID3 timed metadata in your output. For more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.4 Semantics. When you specify a value for ID3 Metadata Value, you must also set ID3 metadata to Passthrough." } }, "documentation": "These settings relate to the fragmented MP4 container for the segments in your DASH outputs." }, "MpdTimedMetadata": { "type": "string", - "documentation": "To include ID3 metadata in this output: Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH). Specify this ID3 metadata in Custom ID3 metadata inserter (timedMetadataInsertion). MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None (NONE) or leave blank.", + "documentation": "To include ID3 metadata in this output: Set ID3 metadata to Passthrough. Specify this ID3 metadata in Custom ID3 metadata inserter. MediaConvert writes each instance of ID3 metadata in a separate Event Message (eMSG) box. To exclude this ID3 metadata: Set ID3 metadata to None or leave blank.", "enum": [ "PASSTHROUGH", "NONE" @@ -9496,7 +9496,7 @@ }, "MpdTimedMetadataBoxVersion": { "type": "string", - "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata (timedMetadata) to Passthrough.", + "documentation": "Specify the event message box (eMSG) version for ID3 timed metadata in your output.\nFor more information, see ISO/IEC 23009-1:2022 section 5.10.3.3.3 Syntax.\nLeave blank to use the default value Version 0.\nWhen you specify Version 1, you must also set ID3 metadata to Passthrough.", "enum": [ "VERSION_0", "VERSION_1" @@ -9504,7 +9504,7 @@ }, "Mpeg2AdaptiveQuantization": { "type": "string", - "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).", + "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Spatial adaptive quantization, and Temporal adaptive quantization.", "enum": [ "OFF", "LOW", @@ -9514,7 +9514,7 @@ }, "Mpeg2CodecLevel": { "type": "string", - "documentation": "Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output.", + "documentation": "Use Level to set the MPEG-2 level for the video output.", "enum": [ "AUTO", "LOW", @@ -9525,7 +9525,7 @@ }, "Mpeg2CodecProfile": { "type": "string", - "documentation": "Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output.", + "documentation": "Use Profile to set the MPEG-2 profile for the video output.", "enum": [ "MAIN", "PROFILE_422" @@ -9533,7 +9533,7 @@ }, "Mpeg2DynamicSubGop": { "type": "string", - "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames).", + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", "enum": [ "ADAPTIVE", "STATIC" @@ -9541,7 +9541,7 @@ }, "Mpeg2FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -9558,7 +9558,7 @@ }, "Mpeg2GopSizeUnits": { "type": "string", - "documentation": "Specify the units for GOP size (GopSize). If you don't specify a value here, by default the encoder measures GOP size in frames.", + "documentation": "Specify the units for GOP size. If you don't specify a value here, by default the encoder measures GOP size in frames.", "enum": [ "FRAMES", "SECONDS" @@ -9566,7 +9566,7 @@ }, "Mpeg2InterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -9577,7 +9577,7 @@ }, "Mpeg2IntraDcPrecision": { "type": "string", - "documentation": "Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision for intra-block DC coefficients. If you choose the value auto, the service will automatically select the precision based on the per-frame compression ratio.", + "documentation": "Use Intra DC precision to set quantization precision for intra-block DC coefficients. If you choose the value auto, the service will automatically select the precision based on the per-frame compression ratio.", "enum": [ "AUTO", "INTRA_DC_PRECISION_8", @@ -9588,7 +9588,7 @@ }, "Mpeg2ParControl": { "type": "string", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -9596,7 +9596,7 @@ }, "Mpeg2QualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "MULTI_PASS" @@ -9604,7 +9604,7 @@ }, "Mpeg2RateControlMode": { "type": "string", - "documentation": "Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate is variable (vbr) or constant (cbr).", + "documentation": "Use Rate control mode to specify whether the bitrate is variable (vbr) or constant (cbr).", "enum": [ "VBR", "CBR" @@ -9612,7 +9612,7 @@ }, "Mpeg2ScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -9632,7 +9632,7 @@ "AdaptiveQuantization": { "shape": "Mpeg2AdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization)." + "documentation": "Specify the strength of any adaptive quantization filters that you enable. The value that you choose here applies to the following settings: Spatial adaptive quantization, and Temporal adaptive quantization." }, "Bitrate": { "shape": "__integerMin1000Max288000000", @@ -9642,22 +9642,22 @@ "CodecLevel": { "shape": "Mpeg2CodecLevel", "locationName": "codecLevel", - "documentation": "Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output." + "documentation": "Use Level to set the MPEG-2 level for the video output." }, "CodecProfile": { "shape": "Mpeg2CodecProfile", "locationName": "codecProfile", - "documentation": "Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output." + "documentation": "Use Profile to set the MPEG-2 profile for the video output." }, "DynamicSubGop": { "shape": "Mpeg2DynamicSubGop", "locationName": "dynamicSubGop", - "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames)." + "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames." }, "FramerateControl": { "shape": "Mpeg2FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "Mpeg2FramerateConversionAlgorithm", @@ -9682,12 +9682,12 @@ "GopSize": { "shape": "__doubleMin0", "locationName": "gopSize", - "documentation": "Specify the interval between keyframes, in seconds or frames, for this output. Default: 12 Related settings: When you specify the GOP size in seconds, set GOP mode control (GopSizeUnits) to Specified, seconds (SECONDS). The default value for GOP mode control (GopSizeUnits) is Frames (FRAMES)." + "documentation": "Specify the interval between keyframes, in seconds or frames, for this output. Default: 12 Related settings: When you specify the GOP size in seconds, set GOP mode control to Specified, seconds. The default value for GOP mode control is Frames." }, "GopSizeUnits": { "shape": "Mpeg2GopSizeUnits", "locationName": "gopSizeUnits", - "documentation": "Specify the units for GOP size (GopSize). If you don't specify a value here, by default the encoder measures GOP size in frames." + "documentation": "Specify the units for GOP size. If you don't specify a value here, by default the encoder measures GOP size in frames." }, "HrdBufferFinalFillPercentage": { "shape": "__integerMin0Max100", @@ -9707,12 +9707,12 @@ "InterlaceMode": { "shape": "Mpeg2InterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "IntraDcPrecision": { "shape": "Mpeg2IntraDcPrecision", "locationName": "intraDcPrecision", - "documentation": "Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision for intra-block DC coefficients. If you choose the value auto, the service will automatically select the precision based on the per-frame compression ratio." + "documentation": "Use Intra DC precision to set quantization precision for intra-block DC coefficients. If you choose the value auto, the service will automatically select the precision based on the per-frame compression ratio." }, "MaxBitrate": { "shape": "__integerMin1000Max300000000", @@ -9722,7 +9722,7 @@ "MinIInterval": { "shape": "__integerMin0Max30", "locationName": "minIInterval", - "documentation": "Use this setting only when you also enable Scene change detection (SceneChangeDetect). This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. When you specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval (minIInterval) to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." + "documentation": "Use this setting only when you also enable Scene change detection. This setting determines how the encoder manages the spacing between I-frames that it inserts as part of the I-frame cadence and the I-frames that it inserts for Scene change detection. When you specify a value for this setting, the encoder determines whether to skip a cadence-driven I-frame by the value you set. For example, if you set Min I interval to 5 and a cadence-driven I-frame would fall within 5 frames of a scene-change I-frame, then the encoder skips the cadence-driven I-frame. In this way, one GOP is shrunk slightly and one GOP is stretched slightly. When the cadence-driven I-frames are farther from the scene-change I-frame than the value you set, then the encoder leaves all I-frames in place and the GOPs surrounding the scene change are smaller than the usual cadence GOPs." }, "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", @@ -9732,32 +9732,32 @@ "ParControl": { "shape": "Mpeg2ParControl", "locationName": "parControl", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "Mpeg2QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "RateControlMode": { "shape": "Mpeg2RateControlMode", "locationName": "rateControlMode", - "documentation": "Use Rate control mode (Mpeg2RateControlMode) to specify whether the bitrate is variable (vbr) or constant (cbr)." + "documentation": "Use Rate control mode to specify whether the bitrate is variable (vbr) or constant (cbr)." }, "ScanTypeConversionMode": { "shape": "Mpeg2ScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SceneChangeDetect": { "shape": "Mpeg2SceneChangeDetect", @@ -9767,39 +9767,39 @@ "SlowPal": { "shape": "Mpeg2SlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." }, "Softness": { "shape": "__integerMin0Max128", "locationName": "softness", - "documentation": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, to use the AWS Elemental default matrices. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." + "documentation": "Ignore this setting unless you need to comply with a specification that requires a specific value. If you don't have a specification requirement, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness or by enabling a noise reducer filter. The Softness setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, to use the AWS Elemental default matrices. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." }, "SpatialAdaptiveQuantization": { "shape": "Mpeg2SpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." }, "Syntax": { "shape": "Mpeg2Syntax", "locationName": "syntax", - "documentation": "Specify whether this output's video uses the D10 syntax. Keep the default value to not use the syntax. Related settings: When you choose D10 (D_10) for your MXF profile (profile), you must also set this value to D10 (D_10)." + "documentation": "Specify whether this output's video uses the D10 syntax. Keep the default value to not use the syntax. Related settings: When you choose D10 for your MXF profile, you must also set this value to D10." }, "Telecine": { "shape": "Mpeg2Telecine", "locationName": "telecine", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." }, "TemporalAdaptiveQuantization": { "shape": "Mpeg2TemporalAdaptiveQuantization", "locationName": "temporalAdaptiveQuantization", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization)." + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value MPEG2." + "documentation": "Required when you set Codec to the value MPEG2." }, "Mpeg2SlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -9807,7 +9807,7 @@ }, "Mpeg2SpatialAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "enum": [ "DISABLED", "ENABLED" @@ -9815,7 +9815,7 @@ }, "Mpeg2Syntax": { "type": "string", - "documentation": "Specify whether this output's video uses the D10 syntax. Keep the default value to not use the syntax. Related settings: When you choose D10 (D_10) for your MXF profile (profile), you must also set this value to D10 (D_10).", + "documentation": "Specify whether this output's video uses the D10 syntax. Keep the default value to not use the syntax. Related settings: When you choose D10 for your MXF profile, you must also set this value to D10.", "enum": [ "DEFAULT", "D_10" @@ -9823,7 +9823,7 @@ }, "Mpeg2Telecine": { "type": "string", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine (HARD) produces a 29.97i output. Soft telecine (SOFT) produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard or soft telecine to create a smoother picture. Hard telecine produces a 29.97i output. Soft telecine produces an output with a 23.976 output that signals to the video player device to do the conversion during play back. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "enum": [ "NONE", "SOFT", @@ -9832,7 +9832,7 @@ }, "Mpeg2TemporalAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization).", + "documentation": "Keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal quantization, adjust the strength of the filter with the setting Adaptive quantization.", "enum": [ "DISABLED", "ENABLED" @@ -9871,11 +9871,11 @@ "documentation": "If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead." } }, - "documentation": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider." + "documentation": "If you are using DRM, set DRM System to specify the value SpekeKeyProvider." }, "MsSmoothFragmentLengthControl": { "type": "string", - "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact to have the encoder use the exact length that you specify with the setting Fragment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.", "enum": [ "EXACT", "GOP_MULTIPLE" @@ -9897,7 +9897,7 @@ "Destination": { "shape": "__stringPatternS3", "locationName": "destination", - "documentation": "Use Destination (Destination) to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." + "documentation": "Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file." }, "DestinationSettings": { "shape": "DestinationSettings", @@ -9907,29 +9907,29 @@ "Encryption": { "shape": "MsSmoothEncryptionSettings", "locationName": "encryption", - "documentation": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider." + "documentation": "If you are using DRM, set DRM System to specify the value SpekeKeyProvider." }, "FragmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "fragmentLength", - "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact to have the encoder use the exact length that you specify with the setting Fragment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." }, "FragmentLengthControl": { "shape": "MsSmoothFragmentLengthControl", "locationName": "fragmentLengthControl", - "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact to have the encoder use the exact length that you specify with the setting Fragment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary." }, "ManifestEncoding": { "shape": "MsSmoothManifestEncoding", "locationName": "manifestEncoding", - "documentation": "Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16." + "documentation": "Use Manifest encoding to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16." } }, - "documentation": "Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to MS_SMOOTH_GROUP_SETTINGS." + "documentation": "Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "MsSmoothManifestEncoding": { "type": "string", - "documentation": "Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16.", + "documentation": "Use Manifest encoding to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16.", "enum": [ "UTF8", "UTF16" @@ -9937,7 +9937,7 @@ }, "MxfAfdSignaling": { "type": "string", - "documentation": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings.", + "documentation": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings.", "enum": [ "NO_COPY", "COPY_FROM_VIDEO" @@ -9960,7 +9960,7 @@ "AfdSignaling": { "shape": "MxfAfdSignaling", "locationName": "afdSignaling", - "documentation": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings." + "documentation": "Optional. When you have AFD signaling set up in your output video stream, use this setting to choose whether to also include it in the MXF wrapper. Choose Don't copy to exclude AFD signaling from the MXF wrapper. Choose Copy from video stream to copy the AFD values from the video stream for this output to the MXF wrapper. Regardless of which option you choose, the AFD values remain in the video stream. Related settings: To set up your output to include or exclude AFD values, see AfdSignaling, under VideoDescription. On the console, find AFD signaling under the output's video encoding settings." }, "Profile": { "shape": "MxfProfile", @@ -9977,7 +9977,7 @@ }, "MxfXavcDurationMode": { "type": "string", - "documentation": "To create an output that complies with the XAVC file format guidelines for interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE). To include all frames from your input in this output, keep the default setting, Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert excludes when you set this to Drop frames for compliance depends on the output frame rate and duration.", + "documentation": "To create an output that complies with the XAVC file format guidelines for interoperability, keep the default value, Drop frames for compliance. To include all frames from your input in this output, keep the default setting, Allow any duration. The number of frames that MediaConvert excludes when you set this to Drop frames for compliance depends on the output frame rate and duration.", "enum": [ "ALLOW_ANY_DURATION", "DROP_FRAMES_FOR_COMPLIANCE" @@ -9989,12 +9989,12 @@ "DurationMode": { "shape": "MxfXavcDurationMode", "locationName": "durationMode", - "documentation": "To create an output that complies with the XAVC file format guidelines for interoperability, keep the default value, Drop frames for compliance (DROP_FRAMES_FOR_COMPLIANCE). To include all frames from your input in this output, keep the default setting, Allow any duration (ALLOW_ANY_DURATION). The number of frames that MediaConvert excludes when you set this to Drop frames for compliance depends on the output frame rate and duration." + "documentation": "To create an output that complies with the XAVC file format guidelines for interoperability, keep the default value, Drop frames for compliance. To include all frames from your input in this output, keep the default setting, Allow any duration. The number of frames that MediaConvert excludes when you set this to Drop frames for compliance depends on the output frame rate and duration." }, "MaxAncDataSize": { "shape": "__integerMin0Max2147483647", "locationName": "maxAncDataSize", - "documentation": "Specify a value for this setting only for outputs that you set up with one of these two XAVC profiles: XAVC HD Intra CBG (XAVC_HD_INTRA_CBG) or XAVC 4K Intra CBG (XAVC_4K_INTRA_CBG). Specify the amount of space in each frame that the service reserves for ancillary data, such as teletext captions. The default value for this setting is 1492 bytes per frame. This should be sufficient to prevent overflow unless you have multiple pages of teletext captions data. If you have a large amount of teletext data, specify a larger number." + "documentation": "Specify a value for this setting only for outputs that you set up with one of these two XAVC profiles: XAVC HD Intra CBG or XAVC 4K Intra CBG. Specify the amount of space in each frame that the service reserves for ancillary data, such as teletext captions. The default value for this setting is 1492 bytes per frame. This should be sufficient to prevent overflow unless you have multiple pages of teletext captions data. If you have a large amount of teletext data, specify a larger number." } }, "documentation": "Specify the XAVC profile settings for MXF outputs when you set your MXF profile to XAVC." @@ -10005,17 +10005,17 @@ "License": { "shape": "__stringMin1Max100000", "locationName": "license", - "documentation": "Use the base64 license string that Nagra provides you. Enter it directly in your JSON job specification or in the console. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job." + "documentation": "Use the base64 license string that Nagra provides you. Enter it directly in your JSON job specification or in the console. Required when you include Nagra NexGuard File Marker watermarking in your job." }, "Payload": { "shape": "__integerMin0Max4194303", "locationName": "payload", - "documentation": "Specify the payload ID that you want associated with this output. Valid values vary depending on your Nagra NexGuard forensic watermarking workflow. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 through 4,194,303. You must generate a unique ID for each asset you watermark, and keep a record of which ID you have assigned to each asset. Neither Nagra nor MediaConvert keep track of the relationship between output files and your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for each asset. Do this by setting up two output groups. For one output group, set the value of Payload ID (payload) to 0 in every output. For the other output group, set Payload ID (payload) to 1 in every output." + "documentation": "Specify the payload ID that you want associated with this output. Valid values vary depending on your Nagra NexGuard forensic watermarking workflow. Required when you include Nagra NexGuard File Marker watermarking in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 through 4,194,303. You must generate a unique ID for each asset you watermark, and keep a record of which ID you have assigned to each asset. Neither Nagra nor MediaConvert keep track of the relationship between output files and your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for each asset. Do this by setting up two output groups. For one output group, set the value of Payload ID to 0 in every output. For the other output group, set Payload ID to 1 in every output." }, "Preset": { "shape": "__stringMin1Max256", "locationName": "preset", - "documentation": "Enter one of the watermarking preset strings that Nagra provides you. Required when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in your job." + "documentation": "Enter one of the watermarking preset strings that Nagra provides you. Required when you include Nagra NexGuard File Marker watermarking in your job." }, "Strength": { "shape": "WatermarkingStrength", @@ -10027,7 +10027,7 @@ }, "NielsenActiveWatermarkProcessType": { "type": "string", - "documentation": "Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the setting SID (sourceId). When you choose CBET (CBET), you must provide a value for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings.", + "documentation": "Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW, you must provide a value for the setting SID. When you choose CBET, you must provide a value for the setting CSID. When you choose NAES 2, NW, and CBET, you must provide values for both of these settings.", "enum": [ "NAES2_AND_NW", "CBET", @@ -10045,10 +10045,10 @@ "DistributorId": { "shape": "__string", "locationName": "distributorId", - "documentation": "Use Distributor ID (DistributorID) to specify the distributor ID that is assigned to your organization by Neilsen." + "documentation": "Use Distributor ID to specify the distributor ID that is assigned to your organization by Nielsen." } }, - "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs in the job. To enable Nielsen configuration programmatically, include an instance of nielsenConfiguration in your JSON job specification. Even if you don't include any children of nielsenConfiguration, you still enable the setting." + "documentation": "Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job." }, "NielsenNonLinearWatermarkSettings": { "type": "structure", @@ -10056,12 +10056,12 @@ "ActiveWatermarkProcess": { "shape": "NielsenActiveWatermarkProcessType", "locationName": "activeWatermarkProcess", - "documentation": "Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the setting SID (sourceId). When you choose CBET (CBET), you must provide a value for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings." + "documentation": "Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW, you must provide a value for the setting SID. When you choose CBET, you must provide a value for the setting CSID. When you choose NAES 2, NW, and CBET, you must provide values for both of these settings." }, "AdiFilename": { "shape": "__stringPatternS3", "locationName": "adiFilename", - "documentation": "Optional. Use this setting when you want the service to include an ADI file in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon S3 and provide a URL to it here. The URL should be in the following format: S3://bucket/path/ADI-file. For more information about the metadata .zip file, see the setting Metadata destination (metadataDestination)." + "documentation": "Optional. Use this setting when you want the service to include an ADI file in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon S3 and provide a URL to it here. The URL should be in the following format: S3://bucket/path/ADI-file. For more information about the metadata .zip file, see the setting Metadata destination." }, "AssetId": { "shape": "__stringMin1Max20", @@ -10076,7 +10076,7 @@ "CbetSourceId": { "shape": "__stringPattern0xAFaF0908190908", "locationName": "cbetSourceId", - "documentation": "Use the CSID that Nielsen provides to you. This CBET source ID should be unique to your Nielsen account but common to all of your output assets that have CBET watermarking. Required when you choose a value for the setting Watermark types (ActiveWatermarkProcess) that includes CBET." + "documentation": "Use the CSID that Nielsen provides to you. This CBET source ID should be unique to your Nielsen account but common to all of your output assets that have CBET watermarking. Required when you choose a value for the setting Watermark types that includes CBET." }, "EpisodeId": { "shape": "__stringMin1Max20", @@ -10086,7 +10086,7 @@ "MetadataDestination": { "shape": "__stringPatternS3", "locationName": "metadataDestination", - "documentation": "Specify the Amazon S3 location where you want MediaConvert to save your Nielsen non-linear metadata .zip file. This Amazon S3 bucket must be in the same Region as the one where you do your MediaConvert transcoding. If you want to include an ADI file in this .zip file, use the setting ADI file (adiFilename) to specify it. MediaConvert delivers the Nielsen metadata .zip files only to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. You are responsible for delivering the metadata .zip files to Nielsen." + "documentation": "Specify the Amazon S3 location where you want MediaConvert to save your Nielsen non-linear metadata .zip file. This Amazon S3 bucket must be in the same Region as the one where you do your MediaConvert transcoding. If you want to include an ADI file in this .zip file, use the setting ADI file to specify it. MediaConvert delivers the Nielsen metadata .zip files only to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. You are responsible for delivering the metadata .zip files to Nielsen." }, "SourceId": { "shape": "__integerMin0Max65534", @@ -10096,7 +10096,7 @@ "SourceWatermarkStatus": { "shape": "NielsenSourceWatermarkStatusType", "locationName": "sourceWatermarkStatus", - "documentation": "Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked (WATERMARKED), the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks." + "documentation": "Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked, the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks." }, "TicServerUrl": { "shape": "__stringPatternHttps", @@ -10106,14 +10106,14 @@ "UniqueTicPerAudioTrack": { "shape": "NielsenUniqueTicPerAudioTrackType", "locationName": "uniqueTicPerAudioTrack", - "documentation": "To create assets that have the same TIC values in each audio track, keep the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK)." + "documentation": "To create assets that have the same TIC values in each audio track, keep the default value Share TICs. To create assets that have unique TIC values for each audio track, choose Use unique TICs." } }, "documentation": "Ignore these settings unless you are using Nielsen non-linear watermarking. Specify the values that MediaConvert uses to generate and place Nielsen watermarks in your output audio. In addition to specifying these values, you also need to set up your cloud TIC server. These settings apply to every output in your job. The MediaConvert implementation is currently with the following Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0]" }, "NielsenSourceWatermarkStatusType": { "type": "string", - "documentation": "Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked (WATERMARKED), the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks.", + "documentation": "Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked, the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks.", "enum": [ "CLEAN", "WATERMARKED" @@ -10121,7 +10121,7 @@ }, "NielsenUniqueTicPerAudioTrackType": { "type": "string", - "documentation": "To create assets that have the same TIC values in each audio track, keep the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK).", + "documentation": "To create assets that have the same TIC values in each audio track, keep the default value Share TICs. To create assets that have unique TIC values for each audio track, choose Use unique TICs.", "enum": [ "RESERVE_UNIQUE_TICS_PER_TRACK", "SAME_TICS_PER_TRACK" @@ -10129,7 +10129,7 @@ }, "NoiseFilterPostTemporalSharpening": { "type": "string", - "documentation": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening (postTemporalSharpening) to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (postTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening.", + "documentation": "When you set Noise reducer to Temporal, the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto, allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled, specify how much sharpening is applied using Post temporal sharpening strength. Set Post temporal sharpening to Disabled to not apply sharpening.", "enum": [ "DISABLED", "ENABLED", @@ -10138,7 +10138,7 @@ }, "NoiseFilterPostTemporalSharpeningStrength": { "type": "string", - "documentation": "Use Post temporal sharpening strength (postTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High (HIGH) to indicate the amount of sharpening.", + "documentation": "Use Post temporal sharpening strength to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low, Medium, or High to indicate the amount of sharpening.", "enum": [ "LOW", "MEDIUM", @@ -10151,7 +10151,7 @@ "Filter": { "shape": "NoiseReducerFilter", "locationName": "filter", - "documentation": "Use Noise reducer filter (NoiseReducerFilter) to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion." + "documentation": "Use Noise reducer filter to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer. * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion." }, "FilterSettings": { "shape": "NoiseReducerFilterSettings", @@ -10173,7 +10173,7 @@ }, "NoiseReducerFilter": { "type": "string", - "documentation": "Use Noise reducer filter (NoiseReducerFilter) to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion.", + "documentation": "Use Noise reducer filter to select one of the following spatial image filtering functions. To use this setting, you must also enable Noise reducer. * Bilateral preserves edges while reducing noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain filtering based on JND principles. * Temporal optimizes video quality for complex motion.", "enum": [ "BILATERAL", "MEAN", @@ -10228,12 +10228,12 @@ "PostTemporalSharpening": { "shape": "NoiseFilterPostTemporalSharpening", "locationName": "postTemporalSharpening", - "documentation": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening (postTemporalSharpening) to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (postTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening." + "documentation": "When you set Noise reducer to Temporal, the bandwidth and sharpness of your output is reduced. You can optionally use Post temporal sharpening to apply sharpening to the edges of your output. Note that Post temporal sharpening will also make the bandwidth reduction from the Noise reducer smaller. The default behavior, Auto, allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled, specify how much sharpening is applied using Post temporal sharpening strength. Set Post temporal sharpening to Disabled to not apply sharpening." }, "PostTemporalSharpeningStrength": { "shape": "NoiseFilterPostTemporalSharpeningStrength", "locationName": "postTemporalSharpeningStrength", - "documentation": "Use Post temporal sharpening strength (postTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), Medium (MEDIUM), or High (HIGH) to indicate the amount of sharpening." + "documentation": "Use Post temporal sharpening strength to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low, Medium, or High to indicate the amount of sharpening." }, "Speed": { "shape": "__integerMinNegative1Max3", @@ -10273,7 +10273,7 @@ "Channels": { "shape": "__integerMin1Max2", "locationName": "channels", - "documentation": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." + "documentation": "Specify the number of channels in this output audio track. Choosing Mono on gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2." }, "SampleRate": { "shape": "__integerMin16000Max48000", @@ -10297,12 +10297,12 @@ "AudioDescriptions": { "shape": "__listOfAudioDescription", "locationName": "audioDescriptions", - "documentation": "(AudioDescriptions) contains groups of audio encoding settings organized by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) can contain multiple groups of encoding settings." + "documentation": "Contains groups of audio encoding settings organized by audio codec. Include one instance of per output. Can contain multiple groups of encoding settings." }, "CaptionDescriptions": { "shape": "__listOfCaptionDescription", "locationName": "captionDescriptions", - "documentation": "(CaptionDescriptions) contains groups of captions settings. For each output that has captions, include one instance of (CaptionDescriptions). (CaptionDescriptions) can contain multiple groups of captions settings." + "documentation": "Contains groups of captions settings. For each output that has captions, include one instance of CaptionDescriptions. Can contain multiple groups of captions settings." }, "ContainerSettings": { "shape": "ContainerSettings", @@ -10312,12 +10312,12 @@ "Extension": { "shape": "__string", "locationName": "extension", - "documentation": "Use Extension (Extension) to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" + "documentation": "Use Extension to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" }, "NameModifier": { "shape": "__stringMin1", "locationName": "nameModifier", - "documentation": "Use Name modifier (NameModifier) to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier (NameModifier) is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group." + "documentation": "Use Name modifier to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group." }, "OutputSettings": { "shape": "OutputSettings", @@ -10327,7 +10327,7 @@ "Preset": { "shape": "__stringMin0", "locationName": "preset", - "documentation": "Use Preset (Preset) to specify a preset for your transcoding settings. Provide the system or custom preset name. You can specify either Preset (Preset) or Container settings (ContainerSettings), but not both." + "documentation": "Use Preset to specify a preset for your transcoding settings. Provide the system or custom preset name. You can specify either Preset or Container settings, but not both." }, "VideoDescription": { "shape": "VideoDescription", @@ -10380,7 +10380,7 @@ "CustomName": { "shape": "__string", "locationName": "customName", - "documentation": "Use Custom Group Name (CustomName) to specify a name for the output group. This value is displayed on the console and can make your job settings JSON more human-readable. It does not affect your outputs. Use up to twelve characters that are either letters, numbers, spaces, or underscores." + "documentation": "Use Custom Group Name to specify a name for the output group. This value is displayed on the console and can make your job settings JSON more human-readable. It does not affect your outputs. Use up to twelve characters that are either letters, numbers, spaces, or underscores." }, "Name": { "shape": "__string", @@ -10417,27 +10417,27 @@ "CmafGroupSettings": { "shape": "CmafGroupSettings", "locationName": "cmafGroupSettings", - "documentation": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to CMAF_GROUP_SETTINGS." + "documentation": "Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "DashIsoGroupSettings": { "shape": "DashIsoGroupSettings", "locationName": "dashIsoGroupSettings", - "documentation": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to DASH_ISO_GROUP_SETTINGS." + "documentation": "Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "FileGroupSettings": { "shape": "FileGroupSettings", "locationName": "fileGroupSettings", - "documentation": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to FILE_GROUP_SETTINGS." + "documentation": "Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package." }, "HlsGroupSettings": { "shape": "HlsGroupSettings", "locationName": "hlsGroupSettings", - "documentation": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to HLS_GROUP_SETTINGS." + "documentation": "Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "MsSmoothGroupSettings": { "shape": "MsSmoothGroupSettings", "locationName": "msSmoothGroupSettings", - "documentation": "Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html. When you work directly in your JSON job specification, include this object and any required children when you set Type, under OutputGroupSettings, to MS_SMOOTH_GROUP_SETTINGS." + "documentation": "Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html." }, "Type": { "shape": "OutputGroupType", @@ -10481,7 +10481,7 @@ }, "PadVideo": { "type": "string", - "documentation": "Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video (padVideo) to Black (BLACK), MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled (DISABLED) or leave blank.", + "documentation": "Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video to Black, MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled or leave blank.", "enum": [ "DISABLED", "BLACK" @@ -10584,7 +10584,7 @@ "AudioDescriptions": { "shape": "__listOfAudioDescription", "locationName": "audioDescriptions", - "documentation": "(AudioDescriptions) contains groups of audio encoding settings organized by audio codec. Include one instance of (AudioDescriptions) per output. (AudioDescriptions) can contain multiple groups of encoding settings." + "documentation": "Contains groups of audio encoding settings organized by audio codec. Include one instance of per output. Can contain multiple groups of encoding settings." }, "CaptionDescriptions": { "shape": "__listOfCaptionDescriptionPreset", @@ -10614,7 +10614,7 @@ }, "ProresChromaSampling": { "type": "string", - "documentation": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose an output codec profile that supports 4:4:4 chroma sampling. These values for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all video preprocessors except for Nexguard file marker (PartnerWatermarking). When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) to Drop duplicate (DUPLICATE_DROP).", + "documentation": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer.", "enum": [ "PRESERVE_444_SAMPLING", "SUBSAMPLE_TO_422" @@ -10622,7 +10622,7 @@ }, "ProresCodecProfile": { "type": "string", - "documentation": "Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec to use for this output.", + "documentation": "Use Profile to specify the type of Apple ProRes codec to use for this output.", "enum": [ "APPLE_PRORES_422", "APPLE_PRORES_422_HQ", @@ -10634,7 +10634,7 @@ }, "ProresFramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -10651,7 +10651,7 @@ }, "ProresInterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -10662,7 +10662,7 @@ }, "ProresParControl": { "type": "string", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -10670,7 +10670,7 @@ }, "ProresScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -10682,17 +10682,17 @@ "ChromaSampling": { "shape": "ProresChromaSampling", "locationName": "chromaSampling", - "documentation": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose an output codec profile that supports 4:4:4 chroma sampling. These values for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all video preprocessors except for Nexguard file marker (PartnerWatermarking). When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) to Drop duplicate (DUPLICATE_DROP)." + "documentation": "This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer." }, "CodecProfile": { "shape": "ProresCodecProfile", "locationName": "codecProfile", - "documentation": "Use Profile (ProResCodecProfile) to specify the type of Apple ProRes codec to use for this output." + "documentation": "Use Profile to specify the type of Apple ProRes codec to use for this output." }, "FramerateControl": { "shape": "ProresFramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "ProresFramerateConversionAlgorithm", @@ -10712,44 +10712,44 @@ "InterlaceMode": { "shape": "ProresInterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "ParControl": { "shape": "ProresParControl", "locationName": "parControl", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "ScanTypeConversionMode": { "shape": "ProresScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SlowPal": { "shape": "ProresSlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25." }, "Telecine": { "shape": "ProresTelecine", "locationName": "telecine", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES." + "documentation": "Required when you set Codec to the value PRORES." }, "ProresSlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output. When you enable slow PAL, MediaConvert relabels the video frames to 25 fps and resamples your audio to keep it synchronized with the video. Note that enabling this setting will slightly reduce the duration of your video. Required settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -10757,7 +10757,7 @@ }, "ProresTelecine": { "type": "string", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "enum": [ "NONE", "HARD" @@ -10919,7 +10919,7 @@ "ChannelMapping": { "shape": "ChannelMapping", "locationName": "channelMapping", - "documentation": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both." + "documentation": "Channel mapping contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both." }, "ChannelsIn": { "shape": "__integerMin1Max64", @@ -10932,7 +10932,7 @@ "documentation": "Specify the number of channels in this output after remixing. Valid values: 1, 2, 4, 6, 8... 64. (1 and even numbers to 64.) If you are doing both input channel mapping and output channel mapping, the number of output channels in your input mapping must be the same as the number of input channels in your output mapping." } }, - "documentation": "Use Manual audio remixing (RemixSettings) to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides." + "documentation": "Use Manual audio remixing to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides." }, "RenewalType": { "type": "string", @@ -11038,7 +11038,7 @@ }, "RespondToAfd": { "type": "string", - "documentation": "Use Respond to AFD (RespondToAfd) to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to (NONE). A preferred implementation of this workflow is to set RespondToAfd to (NONE) and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values from this output.", + "documentation": "Use Respond to AFD to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to NONE. A preferred implementation of this workflow is to set RespondToAfd to and set AfdSignaling to AUTO. * Choose None to remove all input AFD values from this output.", "enum": [ "NONE", "RESPOND", @@ -11088,17 +11088,17 @@ "EncryptionType": { "shape": "S3ServerSideEncryptionType", "locationName": "encryptionType", - "documentation": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN (kmsKeyArn)." + "documentation": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3. If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS. By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN." }, "KmsEncryptionContext": { "shape": "__stringPatternAZaZ0902", "locationName": "kmsEncryptionContext", - "documentation": "Optionally, specify the encryption context that you want to use alongside your KMS key. AWS KMS uses this encryption context as additional authenticated data (AAD) to support authenticated encryption. This value must be a base64-encoded UTF-8 string holding JSON which represents a string-string map. To use this setting, you must also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). For more information about encryption context, see: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context." + "documentation": "Optionally, specify the encryption context that you want to use alongside your KMS key. AWS KMS uses this encryption context as additional authenticated data (AAD) to support authenticated encryption. This value must be a base64-encoded UTF-8 string holding JSON which represents a string-string map. To use this setting, you must also set Server-side encryption to AWS KMS. For more information about encryption context, see: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context." }, "KmsKeyArn": { "shape": "__stringPatternArnAwsUsGovCnKmsAZ26EastWestCentralNorthSouthEastWest1912D12KeyAFAF098AFAF094AFAF094AFAF094AFAF0912MrkAFAF0932", "locationName": "kmsKeyArn", - "documentation": "Optionally, specify the customer master key (CMK) that you want to use to encrypt the data key that AWS uses to encrypt your output content. Enter the Amazon Resource Name (ARN) of the CMK. To use this setting, you must also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). If you set Server-side encryption to AWS KMS but don't specify a CMK here, AWS uses the AWS managed CMK associated with Amazon S3." + "documentation": "Optionally, specify the customer master key (CMK) that you want to use to encrypt the data key that AWS uses to encrypt your output content. Enter the Amazon Resource Name (ARN) of the CMK. To use this setting, you must also set Server-side encryption to AWS KMS. If you set Server-side encryption to AWS KMS but don't specify a CMK here, AWS uses the AWS managed CMK associated with Amazon S3." } }, "documentation": "Settings for how your job outputs are encrypted as they are uploaded to Amazon S3." @@ -11115,7 +11115,7 @@ }, "S3ServerSideEncryptionType": { "type": "string", - "documentation": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN (kmsKeyArn).", + "documentation": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3. If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS. By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN.", "enum": [ "SERVER_SIDE_ENCRYPTION_S3", "SERVER_SIDE_ENCRYPTION_KMS" @@ -11132,7 +11132,7 @@ }, "ScalingBehavior": { "type": "string", - "documentation": "Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) to have the service stretch your video image to fit. Keep the setting Default (DEFAULT) to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement (position) in this output.", + "documentation": "Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output to have the service stretch your video image to fit. Keep the setting Default to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement in this output.", "enum": [ "DEFAULT", "STRETCH_TO_OUTPUT" @@ -11140,7 +11140,7 @@ }, "SccDestinationFramerate": { "type": "string", - "documentation": "Set Framerate (SccDestinationFramerate) to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe (FRAMERATE_29_97_NON_DROPFRAME).", + "documentation": "Set Framerate to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe.", "enum": [ "FRAMERATE_23_97", "FRAMERATE_24", @@ -11155,10 +11155,10 @@ "Framerate": { "shape": "SccDestinationFramerate", "locationName": "framerate", - "documentation": "Set Framerate (SccDestinationFramerate) to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe (FRAMERATE_29_97_DROPFRAME) only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe (FRAMERATE_29_97_NON_DROPFRAME)." + "documentation": "Set Framerate to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe." } }, - "documentation": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SCC." + "documentation": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html." }, "SimulateReservedQueue": { "type": "string", @@ -11231,14 +11231,14 @@ "StylePassthrough": { "shape": "SrtStylePassthrough", "locationName": "stylePassthrough", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions." + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions." } }, - "documentation": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to SRT." + "documentation": "Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video." }, "SrtStylePassthrough": { "type": "string", - "documentation": "Set Style passthrough (StylePassthrough) to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions.", + "documentation": "Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions.", "enum": [ "ENABLED", "DISABLED" @@ -11260,7 +11260,7 @@ "StaticKeyValue": { "shape": "__stringPatternAZaZ0932", "locationName": "staticKeyValue", - "documentation": "Relates to DRM implementation. Use a 32-character hexidecimal string to specify Key Value (StaticKeyValue)." + "documentation": "Relates to DRM implementation. Use a 32-character hexidecimal string to specify Key Value." }, "Url": { "shape": "__string", @@ -11326,10 +11326,10 @@ "PageTypes": { "shape": "__listOfTeletextPageType", "locationName": "pageTypes", - "documentation": "Specify the page types for this Teletext page. If you don't specify a value here, the service sets the page type to the default value Subtitle (PAGE_TYPE_SUBTITLE). If you pass through the entire set of Teletext data, don't use this field. When you pass through a set of Teletext pages, your output has the same page types as your input." + "documentation": "Specify the page types for this Teletext page. If you don't specify a value here, the service sets the page type to the default value Subtitle. If you pass through the entire set of Teletext data, don't use this field. When you pass through a set of Teletext pages, your output has the same page types as your input." } }, - "documentation": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TELETEXT." + "documentation": "Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html." }, "TeletextPageType": { "type": "string", @@ -11348,7 +11348,7 @@ "PageNumber": { "shape": "__stringMin3Max3Pattern1809aFAF09aEAE", "locationName": "pageNumber", - "documentation": "Use Page Number (PageNumber) to specify the three-digit hexadecimal page number that will be used for Teletext captions. Do not use this setting if you are passing through teletext from the input source to output." + "documentation": "Use Page Number to specify the three-digit hexadecimal page number that will be used for Teletext captions. Do not use this setting if you are passing through teletext from the input source to output." } }, "documentation": "Settings specific to Teletext caption sources, including Page number." @@ -11359,24 +11359,24 @@ "FontSize": { "shape": "__integerMin10Max48", "locationName": "fontSize", - "documentation": "Use Font Size (FontSize) to set the font size of any burned-in timecode. Valid values are 10, 16, 32, 48." + "documentation": "Use Font size to set the font size of any burned-in timecode. Valid values are 10, 16, 32, 48." }, "Position": { "shape": "TimecodeBurninPosition", "locationName": "position", - "documentation": "Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to specify the location the burned-in timecode on output video." + "documentation": "Use Position under Timecode burn-in to specify the location the burned-in timecode on output video." }, "Prefix": { "shape": "__stringPattern", "locationName": "prefix", - "documentation": "Use Prefix (Prefix) to place ASCII characters before any burned-in timecode. For example, a prefix of \"EZ-\" will result in the timecode \"EZ-00:00:00:00\". Provide either the characters themselves or the ASCII code equivalents. The supported range of characters is 0x20 through 0x7e. This includes letters, numbers, and all special characters represented on a standard English keyboard." + "documentation": "Use Prefix to place ASCII characters before any burned-in timecode. For example, a prefix of \"EZ-\" will result in the timecode \"EZ-00:00:00:00\". Provide either the characters themselves or the ASCII code equivalents. The supported range of characters is 0x20 through 0x7e. This includes letters, numbers, and all special characters represented on a standard English keyboard." } }, "documentation": "Settings for burning the output timecode and specified prefix into the output." }, "TimecodeBurninPosition": { "type": "string", - "documentation": "Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to specify the location the burned-in timecode on output video.", + "documentation": "Use Position under Timecode burn-in to specify the location the burned-in timecode on output video.", "enum": [ "TOP_CENTER", "TOP_LEFT", @@ -11395,29 +11395,29 @@ "Anchor": { "shape": "__stringPattern010920405090509092", "locationName": "anchor", - "documentation": "If you use an editing platform that relies on an anchor timecode, use Anchor Timecode (Anchor) to specify a timecode that will match the input video frame to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior for Anchor Timecode varies depending on your setting for Source (TimecodeSource). * If Source (TimecodeSource) is set to Specified Start (SPECIFIEDSTART), the first input frame is the specified value in Start Timecode (Start). Anchor Timecode (Anchor) and Start Timecode (Start) are used calculate output timecode. * If Source (TimecodeSource) is set to Start at 0 (ZEROBASED) the first frame is 00:00:00:00. * If Source (TimecodeSource) is set to Embedded (EMBEDDED), the first frame is the timecode value on the first input frame of the input." + "documentation": "If you use an editing platform that relies on an anchor timecode, use Anchor Timecode to specify a timecode that will match the input video frame to the output video frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF). This setting ignores frame rate conversion. System behavior for Anchor Timecode varies depending on your setting for Source. * If Source is set to Specified Start, the first input frame is the specified value in Start Timecode. Anchor Timecode and Start Timecode are used calculate output timecode. * If Source is set to Start at 0 the first frame is 00:00:00:00. * If Source is set to Embedded, the first frame is the timecode value on the first input frame of the input." }, "Source": { "shape": "TimecodeSource", "locationName": "source", - "documentation": "Use Source (TimecodeSource) to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 (ZEROBASED) instead. * Start at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame to a value other than zero. You use Start timecode (Start) to provide this value." + "documentation": "Use Source to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 instead. * Start at 0 - Set the timecode of the initial frame to 00:00:00:00. * Specified Start - Set the timecode of the initial frame to a value other than zero. You use Start timecode to provide this value." }, "Start": { "shape": "__stringPattern010920405090509092", "locationName": "start", - "documentation": "Only use when you set Source (TimecodeSource) to Specified start (SPECIFIEDSTART). Use Start timecode (Start) to specify the timecode for the initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF)." + "documentation": "Only use when you set Source to Specified start. Use Start timecode to specify the timecode for the initial frame. Use 24-hour format with frame number, (HH:MM:SS:FF) or (HH:MM:SS;FF)." }, "TimestampOffset": { "shape": "__stringPattern0940191020191209301", "locationName": "timestampOffset", - "documentation": "Only applies to outputs that support program-date-time stamp. Use Timestamp offset (TimestampOffset) to overwrite the timecode date without affecting the time and frame number. Provide the new date as a string in the format \"yyyy-mm-dd\". To use Time stamp offset, you must also enable Insert program-date-time (InsertProgramDateTime) in the output settings. For example, if the date part of your timecodes is 2002-1-25 and you want to change it to one year later, set Timestamp offset (TimestampOffset) to 2003-1-25." + "documentation": "Only applies to outputs that support program-date-time stamp. Use Timestamp offset to overwrite the timecode date without affecting the time and frame number. Provide the new date as a string in the format \"yyyy-mm-dd\". To use Timestamp offset, you must also enable Insert program-date-time in the output settings. For example, if the date part of your timecodes is 2002-1-25 and you want to change it to one year later, set Timestamp offset to 2003-1-25." } }, "documentation": "These settings control how the service handles timecodes throughout the job. These settings don't affect input clipping." }, "TimecodeSource": { "type": "string", - "documentation": "Use Source (TimecodeSource) to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded (EMBEDDED) - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 (ZEROBASED) instead. * Start at 0 (ZEROBASED) - Set the timecode of the initial frame to 00:00:00:00. * Specified Start (SPECIFIEDSTART) - Set the timecode of the initial frame to a value other than zero. You use Start timecode (Start) to provide this value.", + "documentation": "Use Source to set how timecodes are handled within this job. To make sure that your video, audio, captions, and markers are synchronized and that time-based features, such as image inserter, work correctly, choose the Timecode source option that matches your assets. All timecodes are in a 24-hour format with frame number (HH:MM:SS:FF). * Embedded - Use the timecode that is in the input video. If no embedded timecode is in the source, the service will use Start at 0 instead. * Start at 0 - Set the timecode of the initial frame to 00:00:00:00. * Specified Start - Set the timecode of the initial frame to a value other than zero. You use Start timecode to provide this value.", "enum": [ "EMBEDDED", "ZEROBASED", @@ -11426,7 +11426,7 @@ }, "TimedMetadata": { "type": "string", - "documentation": "Set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH) to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period (timedMetadataId3Period), and Custom ID3 metadata inserter (timedMetadataInsertion). To exclude this ID3 metadata in this output: set ID3 metadata to None (NONE) or leave blank.", + "documentation": "Set ID3 metadata to Passthrough to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period, and Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: set ID3 metadata to None or leave blank.", "enum": [ "PASSTHROUGH", "NONE" @@ -11441,7 +11441,7 @@ "documentation": "Id3Insertions contains the array of Id3Insertion instances." } }, - "documentation": "Insert user-defined custom ID3 metadata (id3) at timecodes (timecode) that you specify. In each output that you want to include this metadata, you must set ID3 metadata (timedMetadata) to Passthrough (PASSTHROUGH)." + "documentation": "Insert user-defined custom ID3 metadata at timecodes that you specify. In each output that you want to include this metadata, you must set ID3 metadata to Passthrough." }, "Timing": { "type": "structure", @@ -11498,7 +11498,7 @@ "documentation": "Pass through style and position information from a TTML-like input source (TTML, IMSC, SMPTE-TT) to the TTML output." } }, - "documentation": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to TTML." + "documentation": "Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." }, "TtmlStylePassthrough": { "type": "string", @@ -11683,7 +11683,7 @@ }, "Vc3Class": { "type": "string", - "documentation": "Specify the VC3 class to choose the quality characteristics for this output. VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) and Resolution (height and width), determine your output bitrate. For example, say that your video resolution is 1920x1080 and your framerate is 29.97. Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit depth of your output.", + "documentation": "Specify the VC3 class to choose the quality characteristics for this output. VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) and Resolution (height and width), determine your output bitrate. For example, say that your video resolution is 1920x1080 and your framerate is 29.97. Then Class 145 gives you an output with a bitrate of approximately 145 Mbps and Class 220 gives you and output with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit depth of your output.", "enum": [ "CLASS_145_8BIT", "CLASS_220_8BIT", @@ -11692,7 +11692,7 @@ }, "Vc3FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -11717,7 +11717,7 @@ }, "Vc3ScanTypeConversionMode": { "type": "string", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE).", + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive.", "enum": [ "INTERLACED", "INTERLACED_OPTIMIZE" @@ -11729,7 +11729,7 @@ "FramerateControl": { "shape": "Vc3FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "Vc3FramerateConversionAlgorithm", @@ -11754,29 +11754,29 @@ "ScanTypeConversionMode": { "shape": "Vc3ScanTypeConversionMode", "locationName": "scanTypeConversionMode", - "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing (INTERLACED_OPTIMIZE) to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing (INTERLACED), for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine (telecine) to None (NONE) or Soft (SOFT). You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode (interlaceMode) to a value other than Progressive (PROGRESSIVE)." + "documentation": "Use this setting for interlaced outputs, when your output frame rate is half of your input frame rate. In this situation, choose Optimized interlacing to create a better quality interlaced output. In this case, each progressive frame from the input corresponds to an interlaced field in the output. Keep the default value, Basic interlacing, for all other output frame rates. With basic interlacing, MediaConvert performs any frame rate conversion first and then interlaces the frames. When you choose Optimized interlacing and you set your output frame rate to a value that isn't suitable for optimized interlacing, MediaConvert automatically falls back to basic interlacing. Required settings: To use optimized interlacing, you must set Telecine to None or Soft. You can't use optimized interlacing for hard telecine outputs. You must also set Interlace mode to a value other than Progressive." }, "SlowPal": { "shape": "Vc3SlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Framerate to 25." }, "Telecine": { "shape": "Vc3Telecine", "locationName": "telecine", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture." }, "Vc3Class": { "shape": "Vc3Class", "locationName": "vc3Class", - "documentation": "Specify the VC3 class to choose the quality characteristics for this output. VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) and Resolution (height and width), determine your output bitrate. For example, say that your video resolution is 1920x1080 and your framerate is 29.97. Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit depth of your output." + "documentation": "Specify the VC3 class to choose the quality characteristics for this output. VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) and Resolution (height and width), determine your output bitrate. For example, say that your video resolution is 1920x1080 and your framerate is 29.97. Then Class 145 gives you an output with a bitrate of approximately 145 Mbps and Class 220 gives you and output with a bitrate of approximately 220 Mbps. VC3 class also specifies the color bit depth of your output." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VC3" + "documentation": "Required when you set Codec to the value VC3" }, "Vc3SlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Framerate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Framerate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -11784,7 +11784,7 @@ }, "Vc3Telecine": { "type": "string", - "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine (HARD) to create a smoother picture. When you keep the default value, None (NONE), MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", + "documentation": "When you do frame rate conversion from 23.976 frames per second (fps) to 29.97 fps, and your output scan type is interlaced, you can optionally enable hard telecine to create a smoother picture. When you keep the default value, None, MediaConvert does a standard frame rate conversion to 29.97 without doing anything with the field polarity to create a smoother picture.", "enum": [ "NONE", "HARD" @@ -11837,12 +11837,12 @@ "FrameCaptureSettings": { "shape": "FrameCaptureSettings", "locationName": "frameCaptureSettings", - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value FRAME_CAPTURE." + "documentation": "Required when you set Codec to the value FRAME_CAPTURE." }, "H264Settings": { "shape": "H264Settings", "locationName": "h264Settings", - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value H_264." + "documentation": "Required when you set Codec to the value H_264." }, "H265Settings": { "shape": "H265Settings", @@ -11852,35 +11852,35 @@ "Mpeg2Settings": { "shape": "Mpeg2Settings", "locationName": "mpeg2Settings", - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value MPEG2." + "documentation": "Required when you set Codec to the value MPEG2." }, "ProresSettings": { "shape": "ProresSettings", "locationName": "proresSettings", - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value PRORES." + "documentation": "Required when you set Codec to the value PRORES." }, "Vc3Settings": { "shape": "Vc3Settings", "locationName": "vc3Settings", - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VC3" + "documentation": "Required when you set Codec to the value VC3" }, "Vp8Settings": { "shape": "Vp8Settings", "locationName": "vp8Settings", - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP8." + "documentation": "Required when you set Codec to the value VP8." }, "Vp9Settings": { "shape": "Vp9Settings", "locationName": "vp9Settings", - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP9." + "documentation": "Required when you set Codec to the value VP9." }, "XavcSettings": { "shape": "XavcSettings", "locationName": "xavcSettings", - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value XAVC." + "documentation": "Required when you set Codec to the value XAVC." } }, - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings" + "documentation": "Video codec settings contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec. For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings" }, "VideoDescription": { "type": "structure", @@ -11888,7 +11888,7 @@ "AfdSignaling": { "shape": "AfdSignaling", "locationName": "afdSignaling", - "documentation": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling (AfdSignaling) to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data." + "documentation": "This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert AFD signaling to specify whether the service includes AFD values in the output video data and what those values are. * Choose None to remove all AFD values from this output. * Choose Fixed to ignore input AFD values and instead encode the value specified in the job. * Choose Auto to calculate output AFD values based on the input AFD scaler data." }, "AntiAlias": { "shape": "AntiAlias", @@ -11898,27 +11898,27 @@ "CodecSettings": { "shape": "VideoCodecSettings", "locationName": "codecSettings", - "documentation": "Video codec settings, (CodecSettings) under (VideoDescription), contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec (Codec). For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings" + "documentation": "Video codec settings contains the group of settings related to video encoding. The settings in this group vary depending on the value that you choose for Video codec. For each codec enum that you choose, define the corresponding settings object. The following lists the codec enum, settings object pairs. * AV1, Av1Settings * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings * XAVC, XavcSettings" }, "ColorMetadata": { "shape": "ColorMetadata", "locationName": "colorMetadata", - "documentation": "Choose Insert (INSERT) for this setting to include color metadata in this output. Choose Ignore (IGNORE) to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default." + "documentation": "Choose Insert for this setting to include color metadata in this output. Choose Ignore to exclude color metadata from this output. If you don't specify a value, the service sets this to Insert by default." }, "Crop": { "shape": "Rectangle", "locationName": "crop", - "documentation": "Use Cropping selection (crop) to specify the video area that the service will include in the output video frame." + "documentation": "Use Cropping selection to specify the video area that the service will include in the output video frame." }, "DropFrameTimecode": { "shape": "DropFrameTimecode", "locationName": "dropFrameTimecode", - "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion (TimecodeInsertion) is enabled." + "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled." }, "FixedAfd": { "shape": "__integerMin0Max15", "locationName": "fixedAfd", - "documentation": "Applies only if you set AFD Signaling(AfdSignaling) to Fixed (FIXED). Use Fixed (FixedAfd) to specify a four-bit AFD value which the service will write on all frames of this video output." + "documentation": "Applies only if you set AFD Signaling to Fixed. Use Fixed to specify a four-bit AFD value which the service will write on all frames of this video output." }, "Height": { "shape": "__integerMin32Max8192", @@ -11928,32 +11928,32 @@ "Position": { "shape": "Rectangle", "locationName": "position", - "documentation": "Use Selection placement (position) to define the video area in your output frame. The area outside of the rectangle that you specify here is black." + "documentation": "Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black." }, "RespondToAfd": { "shape": "RespondToAfd", "locationName": "respondToAfd", - "documentation": "Use Respond to AFD (RespondToAfd) to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to (NONE). A preferred implementation of this workflow is to set RespondToAfd to (NONE) and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values from this output." + "documentation": "Use Respond to AFD to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to NONE. A preferred implementation of this workflow is to set RespondToAfd to and set AfdSignaling to AUTO. * Choose None to remove all input AFD values from this output." }, "ScalingBehavior": { "shape": "ScalingBehavior", "locationName": "scalingBehavior", - "documentation": "Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) to have the service stretch your video image to fit. Keep the setting Default (DEFAULT) to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement (position) in this output." + "documentation": "Specify how the service handles outputs that have a different aspect ratio from the input aspect ratio. Choose Stretch to output to have the service stretch your video image to fit. Keep the setting Default to have the service letterbox your video instead. This setting overrides any value that you specify for the setting Selection placement in this output." }, "Sharpness": { "shape": "__integerMin0Max100", "locationName": "sharpness", - "documentation": "Use Sharpness (Sharpness) setting to specify the strength of anti-aliasing. This setting changes the width of the anti-alias filter kernel used for scaling. Sharpness only applies if your output resolution is different from your input resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended for most content." + "documentation": "Use Sharpness setting to specify the strength of anti-aliasing. This setting changes the width of the anti-alias filter kernel used for scaling. Sharpness only applies if your output resolution is different from your input resolution. 0 is the softest setting, 100 the sharpest, and 50 recommended for most content." }, "TimecodeInsertion": { "shape": "VideoTimecodeInsertion", "locationName": "timecodeInsertion", - "documentation": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration (TimecodeConfig). In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings (InputTimecodeSource) does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration (TimecodeSource) does." + "documentation": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration. In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration does." }, "VideoPreprocessors": { "shape": "VideoPreprocessor", "locationName": "videoPreprocessors", - "documentation": "Find additional transcoding features under Preprocessors (VideoPreprocessors). Enable the features at each output individually. These features are disabled by default." + "documentation": "Find additional transcoding features under Preprocessors. Enable the features at each output individually. These features are disabled by default." }, "Width": { "shape": "__integerMin32Max8192", @@ -11961,7 +11961,7 @@ "documentation": "Use Width to define the video resolution width, in pixels, for this output. To use the same resolution as your input: Leave both Width and Height blank. To evenly scale from your input resolution: Leave Width blank and enter a value for Height. For example, if your input is 1920x1080 and you set Height to 720, your output will be 1280x720." } }, - "documentation": "Settings related to video encoding of your output. The specific video settings depend on the video codec that you choose. When you work directly in your JSON job specification, include one instance of Video description (VideoDescription) per output." + "documentation": "Settings related to video encoding of your output. The specific video settings depend on the video codec that you choose." }, "VideoDetail": { "type": "structure", @@ -12005,7 +12005,7 @@ "ImageInserter": { "shape": "ImageInserter", "locationName": "imageInserter", - "documentation": "Enable the Image inserter (ImageInserter) feature to include a graphic overlay on your video. Enable or disable this feature for each output individually. This setting is disabled by default." + "documentation": "Enable the Image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each output individually. This setting is disabled by default." }, "NoiseReducer": { "shape": "NoiseReducer", @@ -12023,7 +12023,7 @@ "documentation": "Settings for burning the output timecode and specified prefix into the output." } }, - "documentation": "Find additional transcoding features under Preprocessors (VideoPreprocessors). Enable the features at each output individually. These features are disabled by default." + "documentation": "Find additional transcoding features under Preprocessors. Enable the features at each output individually. These features are disabled by default." }, "VideoSelector": { "type": "structure", @@ -12041,27 +12041,27 @@ "ColorSpaceUsage": { "shape": "ColorSpaceUsage", "locationName": "colorSpaceUsage", - "documentation": "There are two sources for color metadata, the input file and the job input settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). The Color space usage setting determines which takes precedence. Choose Force (FORCE) to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback (FALLBACK) to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings." + "documentation": "There are two sources for color metadata, the input file and the job input settings Color space and HDR master display information settings. The Color space usage setting determines which takes precedence. Choose Force to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings." }, "EmbeddedTimecodeOverride": { "shape": "EmbeddedTimecodeOverride", "locationName": "embeddedTimecodeOverride", - "documentation": "Set Embedded timecode override (embeddedTimecodeOverride) to Use MDPM (USE_MDPM) when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata (MDPM). When you do, we recommend you also set Timecode source (inputTimecodeSource) to Embedded (EMBEDDED). Leave Embedded timecode override blank, or set to None (NONE), when your input does not contain MDPM timecode." + "documentation": "Set Embedded timecode override to Use MDPM when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata. When you do, we recommend you also set Timecode source to Embedded. Leave Embedded timecode override blank, or set to None, when your input does not contain MDPM timecode." }, "Hdr10Metadata": { "shape": "Hdr10Metadata", "locationName": "hdr10Metadata", - "documentation": "Use these settings to provide HDR 10 metadata that is missing or inaccurate in your input video. Appropriate values vary depending on the input video and must be provided by a color grader. The color grader generates these values during the HDR 10 mastering process. The valid range for each of these settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related settings - When you specify these values, you must also set Color space (ColorSpace) to HDR 10 (HDR10). To specify whether the the values you specify here take precedence over the values in the metadata of your input file, set Color space usage (ColorSpaceUsage). To specify whether color metadata is included in an output, set Color metadata (ColorMetadata). For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr." + "documentation": "Use these settings to provide HDR 10 metadata that is missing or inaccurate in your input video. Appropriate values vary depending on the input video and must be provided by a color grader. The color grader generates these values during the HDR 10 mastering process. The valid range for each of these settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related settings - When you specify these values, you must also set Color space to HDR 10. To specify whether the the values you specify here take precedence over the values in the metadata of your input file, set Color space usage. To specify whether color metadata is included in an output, set Color metadata. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr." }, "PadVideo": { "shape": "PadVideo", "locationName": "padVideo", - "documentation": "Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video (padVideo) to Black (BLACK), MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled (DISABLED) or leave blank." + "documentation": "Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video to Black, MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled or leave blank." }, "Pid": { "shape": "__integerMin1Max2147483647", "locationName": "pid", - "documentation": "Use PID (Pid) to select specific video data from an input file. Specify this value as an integer; the system automatically converts it to the hexidecimal value. For example, 257 selects PID 0x101. A PID, or packet identifier, is an identifier for a set of data in an MPEG-2 transport stream container." + "documentation": "Use PID to select specific video data from an input file. Specify this value as an integer; the system automatically converts it to the hexidecimal value. For example, 257 selects PID 0x101. A PID, or packet identifier, is an identifier for a set of data in an MPEG-2 transport stream container." }, "ProgramNumber": { "shape": "__integerMinNegative2147483648Max2147483647", @@ -12071,19 +12071,19 @@ "Rotate": { "shape": "InputRotate", "locationName": "rotate", - "documentation": "Use Rotate (InputRotate) to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata." + "documentation": "Use Rotate to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata." }, "SampleRange": { "shape": "InputSampleRange", "locationName": "sampleRange", - "documentation": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow (FOLLOW), for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata." + "documentation": "If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow, for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata." } }, "documentation": "Input video selectors contain the video settings for the input. Each of your inputs can have up to one video selector." }, "VideoTimecodeInsertion": { "type": "string", - "documentation": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration (TimecodeConfig). In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings (InputTimecodeSource) does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration (TimecodeSource) does.", + "documentation": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration. In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration does.", "enum": [ "DISABLED", "PIC_TIMING_SEI" @@ -12112,7 +12112,7 @@ }, "Vp8FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -12129,7 +12129,7 @@ }, "Vp8ParControl": { "type": "string", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -12137,7 +12137,7 @@ }, "Vp8QualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", "enum": [ "MULTI_PASS", "MULTI_PASS_HQ" @@ -12161,7 +12161,7 @@ "FramerateControl": { "shape": "Vp8FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "Vp8FramerateConversionAlgorithm", @@ -12196,22 +12196,22 @@ "ParControl": { "shape": "Vp8ParControl", "locationName": "parControl", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings." }, "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "Vp8QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." }, "RateControlMode": { "shape": "Vp8RateControlMode", @@ -12219,11 +12219,11 @@ "documentation": "With the VP8 codec, you can use only the variable bitrate (VBR) rate control mode." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP8." + "documentation": "Required when you set Codec to the value VP8." }, "Vp9FramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -12240,7 +12240,7 @@ }, "Vp9ParControl": { "type": "string", - "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. To specify a different PAR by editing the JSON job specification, choose SPECIFIED. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", + "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR in the console, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -12248,7 +12248,7 @@ }, "Vp9QualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding.", "enum": [ "MULTI_PASS", "MULTI_PASS_HQ" @@ -12272,7 +12272,7 @@ "FramerateControl": { "shape": "Vp9FramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction." }, "FramerateConversionAlgorithm": { "shape": "Vp9FramerateConversionAlgorithm", @@ -12312,17 +12312,17 @@ "ParDenominator": { "shape": "__integerMin1Max2147483647", "locationName": "parDenominator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33." }, "ParNumerator": { "shape": "__integerMin1Max2147483647", "locationName": "parNumerator", - "documentation": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." + "documentation": "Required when you set Pixel aspect ratio to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40." }, "QualityTuningLevel": { "shape": "Vp9QualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, multi-pass encoding." }, "RateControlMode": { "shape": "Vp9RateControlMode", @@ -12330,7 +12330,7 @@ "documentation": "With the VP9 codec, you can use only the variable bitrate (VBR) rate control mode." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value VP9." + "documentation": "Required when you set Codec to the value VP9." }, "WarningGroup": { "type": "structure", @@ -12377,7 +12377,7 @@ "BitDepth": { "shape": "__integerMin16Max24", "locationName": "bitDepth", - "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." + "documentation": "Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track." }, "Channels": { "shape": "__integerMin1Max64", @@ -12395,7 +12395,7 @@ "documentation": "Sample rate in Hz." } }, - "documentation": "Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to the value WAV." + "documentation": "Required when you set Codec to the value WAV." }, "WebvttAccessibilitySubs": { "type": "string", @@ -12416,10 +12416,10 @@ "StylePassthrough": { "shape": "WebvttStylePassthrough", "locationName": "stylePassthrough", - "documentation": "To use the available style, color, and position information from your input captions: Set Style passthrough (stylePassthrough) to Enabled (ENABLED). MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict (STRICT). MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled (DISABLED), or leave blank." + "documentation": "To use the available style, color, and position information from your input captions: Set Style passthrough to Enabled. MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict. MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled, or leave blank." } }, - "documentation": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html. When you work directly in your JSON job specification, include this object and any required children when you set destinationType to WebVTT." + "documentation": "Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html." }, "WebvttHlsSourceSettings": { "type": "structure", @@ -12444,7 +12444,7 @@ }, "WebvttStylePassthrough": { "type": "string", - "documentation": "To use the available style, color, and position information from your input captions: Set Style passthrough (stylePassthrough) to Enabled (ENABLED). MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict (STRICT). MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled (DISABLED), or leave blank.", + "documentation": "To use the available style, color, and position information from your input captions: Set Style passthrough to Enabled. MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict. MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled, or leave blank.", "enum": [ "ENABLED", "DISABLED", @@ -12469,7 +12469,7 @@ "documentation": "Specify the XAVC Intra 4k (CBG) Class to set the bitrate of your output. Outputs of the same class have similar image quality over the operating points that are valid for that class." } }, - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K_INTRA_CBG." + "documentation": "Required when you set Profile to the value XAVC_4K_INTRA_CBG." }, "Xavc4kIntraVbrProfileClass": { "type": "string", @@ -12489,7 +12489,7 @@ "documentation": "Specify the XAVC Intra 4k (VBR) Class to set the bitrate of your output. Outputs of the same class have similar image quality over the operating points that are valid for that class." } }, - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K_INTRA_VBR." + "documentation": "Required when you set Profile to the value XAVC_4K_INTRA_VBR." }, "Xavc4kProfileBitrateClass": { "type": "string", @@ -12510,7 +12510,7 @@ }, "Xavc4kProfileQualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "SINGLE_PASS_HQ", @@ -12533,12 +12533,12 @@ "FlickerAdaptiveQuantization": { "shape": "XavcFlickerAdaptiveQuantization", "locationName": "flickerAdaptiveQuantization", - "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization (adaptiveQuantization) to a value other than Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides." + "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization to a value other than Off or Auto. Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides." }, "GopBReference": { "shape": "XavcGopBReference", "locationName": "gopBReference", - "documentation": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames as reference frames. Choose Don't allow (DISABLED) to prevent the encoder from using B-frames as reference frames." + "documentation": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow to allow the encoder to use B-frames as reference frames. Choose Don't allow to prevent the encoder from using B-frames as reference frames." }, "GopClosedCadence": { "shape": "__integerMin0Max2147483647", @@ -12553,7 +12553,7 @@ "QualityTuningLevel": { "shape": "Xavc4kProfileQualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "Slices": { "shape": "__integerMin8Max12", @@ -12561,11 +12561,11 @@ "documentation": "Number of slices per picture. Must be less than or equal to the number of macroblock rows for progressive pictures, and less than or equal to half the number of macroblock rows for interlaced pictures." } }, - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K." + "documentation": "Required when you set Profile to the value XAVC_4K." }, "XavcAdaptiveQuantization": { "type": "string", - "documentation": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off (OFF). Related settings: The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization).", + "documentation": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set Adaptive quantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization, and Temporal adaptive quantization.", "enum": [ "OFF", "AUTO", @@ -12587,7 +12587,7 @@ }, "XavcFlickerAdaptiveQuantization": { "type": "string", - "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization (adaptiveQuantization) to a value other than Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides.", + "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization to a value other than Off or Auto. Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides.", "enum": [ "DISABLED", "ENABLED" @@ -12595,7 +12595,7 @@ }, "XavcFramerateControl": { "type": "string", - "documentation": "If you are using the console, use the Frame rate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list. The framerates shown in the dropdown list are decimal approximations of fractions. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate that you specify in the settings FramerateNumerator and FramerateDenominator.", + "documentation": "If you are using the console, use the Frame rate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list. The framerates shown in the dropdown list are decimal approximations of fractions.", "enum": [ "INITIALIZE_FROM_SOURCE", "SPECIFIED" @@ -12612,7 +12612,7 @@ }, "XavcGopBReference": { "type": "string", - "documentation": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames as reference frames. Choose Don't allow (DISABLED) to prevent the encoder from using B-frames as reference frames.", + "documentation": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow to allow the encoder to use B-frames as reference frames. Choose Don't allow to prevent the encoder from using B-frames as reference frames.", "enum": [ "DISABLED", "ENABLED" @@ -12636,7 +12636,7 @@ "documentation": "Specify the XAVC Intra HD (CBG) Class to set the bitrate of your output. Outputs of the same class have similar image quality over the operating points that are valid for that class." } }, - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD_INTRA_CBG." + "documentation": "Required when you set Profile to the value XAVC_HD_INTRA_CBG." }, "XavcHdProfileBitrateClass": { "type": "string", @@ -12649,7 +12649,7 @@ }, "XavcHdProfileQualityTuningLevel": { "type": "string", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding.", "enum": [ "SINGLE_PASS", "SINGLE_PASS_HQ", @@ -12667,12 +12667,12 @@ "FlickerAdaptiveQuantization": { "shape": "XavcFlickerAdaptiveQuantization", "locationName": "flickerAdaptiveQuantization", - "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (XavcAdaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization (adaptiveQuantization) to a value other than Off (OFF) or Auto (AUTO). Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides." + "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears as a visual flicker that can arise when the encoder saves bits by copying some macroblocks many times from frame to frame, and then refreshes them at the I-frame. When you enable this setting, the encoder updates these macroblocks slightly more often to smooth out the flicker. This setting is disabled by default. Related setting: In addition to enabling this setting, you must also set Adaptive quantization to a value other than Off or Auto. Use Adaptive quantization to adjust the degree of smoothing that Flicker adaptive quantization provides." }, "GopBReference": { "shape": "XavcGopBReference", "locationName": "gopBReference", - "documentation": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow (ENABLED) to allow the encoder to use B-frames as reference frames. Choose Don't allow (DISABLED) to prevent the encoder from using B-frames as reference frames." + "documentation": "Specify whether the encoder uses B-frames as reference frames for other pictures in the same GOP. Choose Allow to allow the encoder to use B-frames as reference frames. Choose Don't allow to prevent the encoder from using B-frames as reference frames." }, "GopClosedCadence": { "shape": "__integerMin0Max2147483647", @@ -12687,12 +12687,12 @@ "InterlaceMode": { "shape": "XavcInterlaceMode", "locationName": "interlaceMode", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose." }, "QualityTuningLevel": { "shape": "XavcHdProfileQualityTuningLevel", "locationName": "qualityTuningLevel", - "documentation": "Optional. Use Quality tuning level (qualityTuningLevel) to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." + "documentation": "Optional. Use Quality tuning level to choose how you want to trade off encoding speed for output video quality. The default behavior is faster, lower quality, single-pass encoding." }, "Slices": { "shape": "__integerMin4Max12", @@ -12702,14 +12702,14 @@ "Telecine": { "shape": "XavcHdProfileTelecine", "locationName": "telecine", - "documentation": "Ignore this setting unless you set Frame rate (framerateNumerator divided by framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard (HARD). Otherwise, keep the default value None (NONE). For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html." + "documentation": "Ignore this setting unless you set Frame rate (framerateNumerator divided by framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard. Otherwise, keep the default value None. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html." } }, - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD." + "documentation": "Required when you set Profile to the value XAVC_HD." }, "XavcHdProfileTelecine": { "type": "string", - "documentation": "Ignore this setting unless you set Frame rate (framerateNumerator divided by framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard (HARD). Otherwise, keep the default value None (NONE). For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html.", + "documentation": "Ignore this setting unless you set Frame rate (framerateNumerator divided by framerateDenominator) to 29.970. If your input framerate is 23.976, choose Hard. Otherwise, keep the default value None. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/working-with-telecine-and-inverse-telecine.html.", "enum": [ "NONE", "HARD" @@ -12717,7 +12717,7 @@ }, "XavcInterlaceMode": { "type": "string", - "documentation": "Choose the scan line type for the output. Keep the default value, Progressive (PROGRESSIVE) to create a progressive output, regardless of the scan type of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) to create an output that's interlaced with the same field polarity throughout. Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", + "documentation": "Choose the scan line type for the output. Keep the default value, Progressive to create a progressive output, regardless of the scan type of your input. Use Top field first or Bottom field first to create an output that's interlaced with the same field polarity throughout. Use Follow, default top or Follow, default bottom to produce outputs with the same field polarity as the source. For jobs that have multiple inputs, the output field polarity might change over the course of the output. Follow behavior depends on the input scan type. If the source is interlaced, the output will be interlaced with the same polarity as the source. If the source is progressive, the output will be interlaced with top field bottom field first, depending on which of the Follow options you choose.", "enum": [ "PROGRESSIVE", "TOP_FIELD", @@ -12743,7 +12743,7 @@ "AdaptiveQuantization": { "shape": "XavcAdaptiveQuantization", "locationName": "adaptiveQuantization", - "documentation": "Keep the default value, Auto (AUTO), for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set Adaptive quantization (adaptiveQuantization) to a value other than Auto (AUTO). Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off (OFF). Related settings: The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization)." + "documentation": "Keep the default value, Auto, for this setting to have MediaConvert automatically apply the best types of quantization for your video content. When you want to apply your quantization settings manually, you must set Adaptive quantization to a value other than Auto. Use this setting to specify the strength of any adaptive quantization filters that you enable. If you don't want MediaConvert to do any adaptive quantization in this transcode, set Adaptive quantization to Off. Related settings: The value that you choose here applies to the following settings: Flicker adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization, and Temporal adaptive quantization." }, "EntropyEncoding": { "shape": "XavcEntropyEncoding", @@ -12753,7 +12753,7 @@ "FramerateControl": { "shape": "XavcFramerateControl", "locationName": "framerateControl", - "documentation": "If you are using the console, use the Frame rate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list. The framerates shown in the dropdown list are decimal approximations of fractions. If you are creating your transcoding job specification as a JSON file without the console, use FramerateControl to specify which value the service uses for the frame rate for this output. Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate from the input. Choose SPECIFIED if you want the service to use the frame rate that you specify in the settings FramerateNumerator and FramerateDenominator." + "documentation": "If you are using the console, use the Frame rate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list. The framerates shown in the dropdown list are decimal approximations of fractions." }, "FramerateConversionAlgorithm": { "shape": "XavcFramerateConversionAlgorithm", @@ -12778,54 +12778,54 @@ "SlowPal": { "shape": "XavcSlowPal", "locationName": "slowPal", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Frame rate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1." + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Frame rate to 25." }, "Softness": { "shape": "__integerMin0Max128", "locationName": "softness", - "documentation": "Ignore this setting unless your downstream workflow requires that you specify it explicitly. Otherwise, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). The Softness (softness) setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." + "documentation": "Ignore this setting unless your downstream workflow requires that you specify it explicitly. Otherwise, we recommend that you adjust the softness of your output by using a lower value for the setting Sharpness or by enabling a noise reducer filter. The Softness setting specifies the quantization matrices that the encoder uses. Keep the default value, 0, for flat quantization. Choose the value 1 or 16 to use the default JVT softening quantization matricies from the H.264 specification. Choose a value from 17 to 128 to use planar interpolation. Increasing values from 17 to 128 result in increasing reduction of high-frequency data. The value 128 results in the softest video." }, "SpatialAdaptiveQuantization": { "shape": "XavcSpatialAdaptiveQuantization", "locationName": "spatialAdaptiveQuantization", - "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." + "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher." }, "TemporalAdaptiveQuantization": { "shape": "XavcTemporalAdaptiveQuantization", "locationName": "temporalAdaptiveQuantization", - "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal adaptive quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization)." + "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal adaptive quantization, adjust the strength of the filter with the setting Adaptive quantization." }, "Xavc4kIntraCbgProfileSettings": { "shape": "Xavc4kIntraCbgProfileSettings", "locationName": "xavc4kIntraCbgProfileSettings", - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K_INTRA_CBG." + "documentation": "Required when you set Profile to the value XAVC_4K_INTRA_CBG." }, "Xavc4kIntraVbrProfileSettings": { "shape": "Xavc4kIntraVbrProfileSettings", "locationName": "xavc4kIntraVbrProfileSettings", - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K_INTRA_VBR." + "documentation": "Required when you set Profile to the value XAVC_4K_INTRA_VBR." }, "Xavc4kProfileSettings": { "shape": "Xavc4kProfileSettings", "locationName": "xavc4kProfileSettings", - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_4K." + "documentation": "Required when you set Profile to the value XAVC_4K." }, "XavcHdIntraCbgProfileSettings": { "shape": "XavcHdIntraCbgProfileSettings", "locationName": "xavcHdIntraCbgProfileSettings", - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD_INTRA_CBG." + "documentation": "Required when you set Profile to the value XAVC_HD_INTRA_CBG." }, "XavcHdProfileSettings": { "shape": "XavcHdProfileSettings", "locationName": "xavcHdProfileSettings", - "documentation": "Required when you set (Profile) under (VideoDescription)>(CodecSettings)>(XavcSettings) to the value XAVC_HD." + "documentation": "Required when you set Profile to the value XAVC_HD." } }, - "documentation": "Required when you set (Codec) under (VideoDescription)>(CodecSettings) to the value XAVC." + "documentation": "Required when you set Codec to the value XAVC." }, "XavcSlowPal": { "type": "string", - "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Frame rate to 25. In your JSON job specification, set (framerateControl) to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to 1.", + "documentation": "Ignore this setting unless your input frame rate is 23.976 or 24 frames per second (fps). Enable slow PAL to create a 25 fps output by relabeling the video frames and resampling your audio. Note that enabling this setting will slightly reduce the duration of your video. Related settings: You must also set Frame rate to 25.", "enum": [ "DISABLED", "ENABLED" @@ -12833,7 +12833,7 @@ }, "XavcSpatialAdaptiveQuantization": { "type": "string", - "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", + "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled, to adjust quantization within each frame based on spatial variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas that can sustain more distortion with no noticeable visual degradation and uses more bits on areas where any small distortion will be noticeable. For example, complex textured blocks are encoded with fewer bits and smooth textured blocks are encoded with more bits. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen with a lot of complex texture, you might choose to disable this feature. Related setting: When you enable spatial adaptive quantization, set the value for Adaptive quantization depending on your content. For homogeneous content, such as cartoons and video games, set it to Low. For content with a wider variety of textures, set it to High or Higher.", "enum": [ "DISABLED", "ENABLED" @@ -12841,7 +12841,7 @@ }, "XavcTemporalAdaptiveQuantization": { "type": "string", - "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto (AUTO), for the setting Adaptive quantization (adaptiveQuantization). When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled (ENABLED), to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal adaptive quantization, adjust the strength of the filter with the setting Adaptive quantization (adaptiveQuantization).", + "documentation": "The best way to set up adaptive quantization is to keep the default value, Auto, for the setting Adaptive quantization. When you do so, MediaConvert automatically applies the best types of quantization for your video content. Include this setting in your JSON job specification only when you choose to change the default value for Adaptive quantization. For this setting, keep the default value, Enabled, to adjust quantization within each frame based on temporal variation of content complexity. When you enable this feature, the encoder uses fewer bits on areas of the frame that aren't moving and uses more bits on complex objects with sharp edges that move a lot. For example, this feature improves the readability of text tickers on newscasts and scoreboards on sports matches. Enabling this feature will almost always improve your video quality. Note, though, that this feature doesn't take into account where the viewer's attention is likely to be. If viewers are likely to be focusing their attention on a part of the screen that doesn't have moving objects with sharp edges, such as sports athletes' faces, you might choose to disable this feature. Related setting: When you enable temporal adaptive quantization, adjust the strength of the filter with the setting Adaptive quantization.", "enum": [ "DISABLED", "ENABLED" @@ -13873,4 +13873,4 @@ } }, "documentation": "AWS Elemental MediaConvert" -} \ No newline at end of file +} diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index 17c8c05491e..c2b7f194f31 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 medialive diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index a82d641f438..12e6a1f6179 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -6137,10 +6137,15 @@ "locationName": "uhdDeviceSettings", "documentation": "Settings that describe an input device that is type UHD." }, - "Tags": { + "Tags": { "shape": "Tags", "locationName": "tags", "documentation": "A collection of key-value pairs." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone associated with this input device." } }, "documentation": "Placeholder documentation for DescribeInputDeviceResponse" @@ -9788,10 +9793,15 @@ "locationName": "uhdDeviceSettings", "documentation": "Settings that describe an input device that is type UHD." }, - "Tags": { + "Tags": { "shape": "Tags", "locationName": "tags", "documentation": "A collection of key-value pairs." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone associated with this input device." } }, "documentation": "An input device." @@ -10049,10 +10059,15 @@ "locationName": "uhdDeviceSettings", "documentation": "Settings that describe an input device that is type UHD." }, - "Tags": { + "Tags": { "shape": "Tags", "locationName": "tags", "documentation": "A collection of key-value pairs." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone associated with this input device." } }, "documentation": "Details of the input device." @@ -15721,6 +15736,11 @@ "shape": "InputDeviceConfigurableSettings", "locationName": "uhdDeviceSettings", "documentation": "The settings that you want to apply to the UHD input device." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone you want associated with this input device." } }, "documentation": "Updates an input device." @@ -15748,6 +15768,11 @@ "shape": "InputDeviceConfigurableSettings", "locationName": "uhdDeviceSettings", "documentation": "The settings that you want to apply to the UHD input device." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone you want associated with this input device." } }, "documentation": "A request to update an input device.", @@ -15818,10 +15843,15 @@ "locationName": "uhdDeviceSettings", "documentation": "Settings that describe an input device that is type UHD." }, - "Tags": { + "Tags": { "shape": "Tags", "locationName": "tags", "documentation": "A collection of key-value pairs." + }, + "AvailabilityZone": { + "shape": "__string", + "locationName": "availabilityZone", + "documentation": "The Availability Zone associated with this input device." } }, "documentation": "Placeholder documentation for UpdateInputDeviceResponse" diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 36e77f2df84..140674caa34 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackagev2/pom.xml b/services/mediapackagev2/pom.xml index 26b62f13676..16cdfacabf5 100644 --- a/services/mediapackagev2/pom.xml +++ b/services/mediapackagev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT mediapackagev2 AWS Java SDK :: Services :: Media Package V2 diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index f02c8ecc99f..9647ed42f00 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index 1e1425ce70d..ba5ffed496c 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index 3e5e81a0a21..ca5c667ee50 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index 094a47aab31..ba3f32adef0 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/medicalimaging/pom.xml b/services/medicalimaging/pom.xml index ff3035e3d7a..5ebe30ed9f6 100644 --- a/services/medicalimaging/pom.xml +++ b/services/medicalimaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT medicalimaging AWS Java SDK :: Services :: Medical Imaging diff --git a/services/memorydb/pom.xml b/services/memorydb/pom.xml index 270d2e3c472..509d976f4b5 100644 --- a/services/memorydb/pom.xml +++ b/services/memorydb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT memorydb AWS Java SDK :: Services :: Memory DB diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index 74987b9dfa5..829c6e76179 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index 0c944663c87..07917103ac0 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index 88fac38a5f0..bf216186376 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhuborchestrator/pom.xml b/services/migrationhuborchestrator/pom.xml index b10c7e7bf60..472aeedcb6a 100644 --- a/services/migrationhuborchestrator/pom.xml +++ b/services/migrationhuborchestrator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT migrationhuborchestrator AWS Java SDK :: Services :: Migration Hub Orchestrator diff --git a/services/migrationhubrefactorspaces/pom.xml b/services/migrationhubrefactorspaces/pom.xml index a90a54e16aa..a08bb023952 100644 --- a/services/migrationhubrefactorspaces/pom.xml +++ b/services/migrationhubrefactorspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT migrationhubrefactorspaces AWS Java SDK :: Services :: Migration Hub Refactor Spaces diff --git a/services/migrationhubstrategy/pom.xml b/services/migrationhubstrategy/pom.xml index af47639b3df..bab3418a884 100644 --- a/services/migrationhubstrategy/pom.xml +++ b/services/migrationhubstrategy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT migrationhubstrategy AWS Java SDK :: Services :: Migration Hub Strategy diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index 09d84088a5c..b6c4e7f6612 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 mobile diff --git a/services/mq/pom.xml b/services/mq/pom.xml index e7c6c7a76f4..606a519775d 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 mq diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index 736ba5facd7..2608820fd5e 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index b375d694da4..67b54ab349a 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index a449a093e4f..2f77db6b822 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index 8d87e920125..e4f0b6981b0 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index 8dfdc89bf71..9664a137455 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/nimble/pom.xml b/services/nimble/pom.xml index 37763703551..b94f402c267 100644 --- a/services/nimble/pom.xml +++ b/services/nimble/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT nimble AWS Java SDK :: Services :: Nimble diff --git a/services/oam/pom.xml b/services/oam/pom.xml index fca1271a882..b596e7b8b4b 100644 --- a/services/oam/pom.xml +++ b/services/oam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT oam AWS Java SDK :: Services :: OAM diff --git a/services/omics/pom.xml b/services/omics/pom.xml index 3d4ae275202..988db696927 100644 --- a/services/omics/pom.xml +++ b/services/omics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT omics AWS Java SDK :: Services :: Omics diff --git a/services/omics/src/main/resources/codegen-resources/endpoint-tests.json b/services/omics/src/main/resources/codegen-resources/endpoint-tests.json index 8ff05e264f8..ce988072093 100644 --- a/services/omics/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/omics/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,55 +1,55 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://omics-fips.us-gov-east-1.api.aws" + "url": "https://omics-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics-fips.us-gov-east-1.amazonaws.com" + "url": "https://omics-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://omics.us-gov-east-1.api.aws" + "url": "https://omics.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics.us-gov-east-1.amazonaws.com" + "url": "https://omics.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,109 +99,109 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://omics-fips.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://omics-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://omics.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics.us-iso-east-1.c2s.ic.gov" + "url": "https://omics.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://omics-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics-fips.us-east-1.amazonaws.com" + "url": "https://omics-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://omics.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://omics.us-east-1.amazonaws.com" + "url": "https://omics.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -247,13 +247,27 @@ } }, "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-isob-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -262,7 +276,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -272,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/omics/src/main/resources/codegen-resources/service-2.json b/services/omics/src/main/resources/codegen-resources/service-2.json index 6279f1c3f23..1d25521ae1b 100644 --- a/services/omics/src/main/resources/codegen-resources/service-2.json +++ b/services/omics/src/main/resources/codegen-resources/service-2.json @@ -2625,6 +2625,13 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "CreationType":{ + "type":"string", + "enum":[ + "IMPORT", + "UPLOAD" + ] + }, "DeleteAnnotationStoreRequest":{ "type":"structure", "required":["name"], @@ -3420,6 +3427,10 @@ "statusMessage":{ "shape":"ReadSetStatusMessage", "documentation":"

The status message for a read set. It provides more detail as to why the read set has a status.

" + }, + "creationType":{ + "shape":"CreationType", + "documentation":"

The creation type of the read set.

" } } }, @@ -3990,6 +4001,10 @@ "gpus":{ "shape":"GetRunTaskResponseGpusInteger", "documentation":"

The number of Graphics Processing Units (GPU) specified in the task.

" + }, + "instanceType":{ + "shape":"TaskInstanceType", + "documentation":"

The instance type for a task.

" } } }, @@ -4597,7 +4612,7 @@ }, "nextToken":{ "shape":"ListAnnotationImportJobsRequestNextTokenString", - "documentation":"

Specify the pagination token from a previous request to retrieve the next page of results.

", + "documentation":"

Specifies the pagination token from a previous request to retrieve the next page of results.

", "location":"querystring", "locationName":"nextToken" }, @@ -4633,7 +4648,7 @@ }, "nextToken":{ "shape":"String", - "documentation":"

A pagination token that's included if more results are available.

" + "documentation":"

Specifies the pagination token from a previous request to retrieve the next page of results.

" } } }, @@ -5824,6 +5839,10 @@ "generatedFrom":{ "shape":"GeneratedFrom", "documentation":"

Where the source originated.

" + }, + "creationType":{ + "shape":"CreationType", + "documentation":"

The creation type of the read set.

" } }, "documentation":"

A filter for read sets.

" @@ -5924,6 +5943,10 @@ "statusMessage":{ "shape":"ReadSetStatusMessage", "documentation":"

The status for a read set. It provides more detail as to why the read set has a status.

" + }, + "creationType":{ + "shape":"CreationType", + "documentation":"

The creation type of the read set.

" } }, "documentation":"

A read set.

" @@ -7441,6 +7464,10 @@ "min":1, "pattern":"[0-9]+" }, + "TaskInstanceType":{ + "type":"string", + "pattern":"[\\p{L}||\\p{M}||\\p{Z}||\\p{S}||\\p{N}||\\p{P}]+" + }, "TaskList":{ "type":"list", "member":{"shape":"TaskListItem"} @@ -7483,6 +7510,10 @@ "gpus":{ "shape":"TaskListItemGpusInteger", "documentation":"

The number of Graphics Processing Units (GPU) specified for the task.

" + }, + "instanceType":{ + "shape":"TaskInstanceType", + "documentation":"

The instance type for a task.

" } }, "documentation":"

A workflow run task.

" @@ -8267,5 +8298,5 @@ "min":1 } }, - "documentation":"

This is the Amazon Omics API Reference. For an introduction to the service, see What is Amazon Omics? in the Amazon Omics User Guide.

" + "documentation":"

This is the AWS HealthOmics API Reference. For an introduction to the service, see What is AWS HealthOmics? in the AWS HealthOmics User Guide.

" } diff --git a/services/opensearch/pom.xml b/services/opensearch/pom.xml index dc8674a092c..b1436890b8b 100644 --- a/services/opensearch/pom.xml +++ b/services/opensearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT opensearch AWS Java SDK :: Services :: Open Search diff --git a/services/opensearchserverless/pom.xml b/services/opensearchserverless/pom.xml index 11773d6f51f..4151ba0b6d0 100644 --- a/services/opensearchserverless/pom.xml +++ b/services/opensearchserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT opensearchserverless AWS Java SDK :: Services :: Open Search Serverless diff --git a/services/opensearchserverless/src/main/resources/codegen-resources/service-2.json b/services/opensearchserverless/src/main/resources/codegen-resources/service-2.json index 5e39befc357..506aa77f329 100644 --- a/services/opensearchserverless/src/main/resources/codegen-resources/service-2.json +++ b/services/opensearchserverless/src/main/resources/codegen-resources/service-2.json @@ -821,7 +821,8 @@ "type":"string", "enum":[ "SEARCH", - "TIMESERIES" + "TIMESERIES", + "VECTORSEARCH" ] }, "ConfigDescription":{ @@ -1627,7 +1628,7 @@ "documentation":"

Description of the error.

" } }, - "documentation":"

OCU Limit Exceeded for service limits

", + "documentation":"

Thrown when the collection you're attempting to create results in a number of search or indexing OCUs that exceeds the account limit.

", "exception":true }, "PolicyDescription":{ @@ -2435,7 +2436,7 @@ }, "samlMetadata":{ "type":"string", - "max":20480, + "max":51200, "min":1, "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A1-\\u00FF]+" }, diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 1a06c1c3eb3..87bcec4cbbc 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index 4d0b98ae783..081f6a9cee4 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 846360f12c8..48ff8ed2127 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/osis/pom.xml b/services/osis/pom.xml index ed56472aaae..3af8f6c3c96 100644 --- a/services/osis/pom.xml +++ b/services/osis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT osis AWS Java SDK :: Services :: OSIS diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index 78f2f73f4ae..f563efb19f7 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/panorama/pom.xml b/services/panorama/pom.xml index 5a89cdd2cc5..2304aca3425 100644 --- a/services/panorama/pom.xml +++ b/services/panorama/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT panorama AWS Java SDK :: Services :: Panorama diff --git a/services/paymentcryptography/pom.xml b/services/paymentcryptography/pom.xml index 017976a017a..878f5ee6eba 100644 --- a/services/paymentcryptography/pom.xml +++ b/services/paymentcryptography/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT paymentcryptography AWS Java SDK :: Services :: Payment Cryptography diff --git a/services/paymentcryptographydata/pom.xml b/services/paymentcryptographydata/pom.xml index 99ba1fc00b5..823ab5684fd 100644 --- a/services/paymentcryptographydata/pom.xml +++ b/services/paymentcryptographydata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT paymentcryptographydata AWS Java SDK :: Services :: Payment Cryptography Data diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 161a1af54e7..230c5c40081 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index 619116db960..a3ce68c90e8 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index d7d509d7f42..3377e1fc068 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index 8af3d028dfa..b2d591c3f86 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index df7445751a4..dc85d2167d2 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpoint/src/main/resources/codegen-resources/service-2.json b/services/pinpoint/src/main/resources/codegen-resources/service-2.json index a68d749ee5e..d37440fc337 100644 --- a/services/pinpoint/src/main/resources/codegen-resources/service-2.json +++ b/services/pinpoint/src/main/resources/codegen-resources/service-2.json @@ -5438,52 +5438,52 @@ } ], "documentation": "

Updates an existing message template for messages that are sent through the voice channel.

" - }, - "VerifyOTPMessage": { - "name": "VerifyOTPMessage", - "http": { - "method": "POST", - "requestUri": "/v1/apps/{application-id}/verify-otp", - "responseCode": 200 - }, - "input": { - "shape": "VerifyOTPMessageRequest" - }, - "output": { - "shape": "VerifyOTPMessageResponse", - "documentation": "

200 response

" - }, - "errors": [ - { - "shape": "BadRequestException", - "documentation": "

400 response

" - }, - { - "shape": "InternalServerErrorException", - "documentation": "

500 response

" - }, - { - "shape": "PayloadTooLargeException", - "documentation": "

413 response

" - }, - { - "shape": "ForbiddenException", - "documentation": "

403 response

" - }, - { - "shape": "NotFoundException", - "documentation": "

404 response

" - }, - { - "shape": "MethodNotAllowedException", - "documentation": "

405 response

" - }, - { - "shape": "TooManyRequestsException", - "documentation": "

429 response

" - } - ], - "documentation": "

Verify an OTP

" + }, + "VerifyOTPMessage": { + "name": "VerifyOTPMessage", + "http": { + "method": "POST", + "requestUri": "/v1/apps/{application-id}/verify-otp", + "responseCode": 200 + }, + "input": { + "shape": "VerifyOTPMessageRequest" + }, + "output": { + "shape": "VerifyOTPMessageResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "

400 response

" + }, + { + "shape": "InternalServerErrorException", + "documentation": "

500 response

" + }, + { + "shape": "PayloadTooLargeException", + "documentation": "

413 response

" + }, + { + "shape": "ForbiddenException", + "documentation": "

403 response

" + }, + { + "shape": "NotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "MethodNotAllowedException", + "documentation": "

405 response

" + }, + { + "shape": "TooManyRequestsException", + "documentation": "

429 response

" + } + ], + "documentation": "

Verify an OTP

" } }, "shapes": { @@ -6411,6 +6411,24 @@ "Name" ] }, + "ApplicationSettingsJourneyLimits": { + "type": "structure", + "members": { + "DailyCap": { + "shape": "__integer", + "documentation": "

The daily number of messages that an endpoint can receive from all journeys. The maximum value is 100. If set to 0, this limit will not apply.

" + }, + "TimeframeCap": { + "shape": "JourneyTimeframeCap", + "documentation": "

The default maximum number of messages that can be sent to an endpoint during the specified timeframe for all journeys.

" + }, + "TotalCap": { + "shape": "__integer", + "documentation": "

The default maximum number of messages that a single journey can sent to a single endpoint. The maximum value is 100. If set to 0, this limit will not apply.

" + } + }, + "documentation": "

The default sending limits for journeys in the application. To override these limits and define custom limits for a specific journey, use the Journey resource.

" + }, "ApplicationSettingsResource": { "type": "structure", "members": { @@ -6433,6 +6451,10 @@ "QuietTime": { "shape": "QuietTime", "documentation": "

The default quiet time for campaigns in the application. Quiet time is a specific time range when messages aren't sent to endpoints, if all the following conditions are met:

  • The EndpointDemographic.Timezone property of the endpoint is set to a valid value.

  • The current time in the endpoint's time zone is later than or equal to the time specified by the QuietTime.Start property for the application (or a campaign or journey that has custom quiet time settings).

  • The current time in the endpoint's time zone is earlier than or equal to the time specified by the QuietTime.End property for the application (or a campaign or journey that has custom quiet time settings).

If any of the preceding conditions isn't met, the endpoint will receive messages from a campaign or journey, even if quiet time is enabled.

" + }, + "JourneyLimits": { + "shape": "ApplicationSettingsJourneyLimits", + "documentation": "

The default sending limits for journeys in the application. These limits apply to each journey for the application but can be overridden, on a per journey basis, with the JourneyLimits resource.

" } }, "documentation": "

Provides information about an application, including the default settings for an application.

", @@ -9484,15 +9506,20 @@ "shape": "__string", "documentation": "

The Web API Key, also referred to as an API_KEY or server key, that you received from Google to communicate with Google services.

" }, + "DefaultAuthenticationMethod": { + "shape": "__string", + "documentation": "

The default authentication method used for GCM. Values are either \"TOKEN\" or \"KEY\". Defaults to \"KEY\".

" + }, "Enabled": { "shape": "__boolean", "documentation": "

Specifies whether to enable the GCM channel for the application.

" + }, + "ServiceJson": { + "shape": "__string", + "documentation": "

The contents of the JSON file provided by Google during registration in order to generate an access token for authentication. For more information see Migrate from legacy FCM APIs to HTTP v1.

" } }, - "documentation": "

Specifies the status and settings of the GCM channel for an application. This channel enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service.

", - "required": [ - "ApiKey" - ] + "documentation": "

Specifies the status and settings of the GCM channel for an application. This channel enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service.

" }, "GCMChannelResponse": { "type": "structure", @@ -9509,6 +9536,10 @@ "shape": "__string", "documentation": "

The Web API Key, also referred to as an API_KEY or server key, that you received from Google to communicate with Google services.

" }, + "DefaultAuthenticationMethod": { + "shape": "__string", + "documentation": "

The default authentication method used for GCM. Values are either \"TOKEN\" or \"KEY\". Defaults to \"KEY\".

" + }, "Enabled": { "shape": "__boolean", "documentation": "

Specifies whether the GCM channel is enabled for the application.

" @@ -9517,6 +9548,10 @@ "shape": "__boolean", "documentation": "

(Not used) This property is retained only for backward compatibility.

" }, + "HasFcmServiceCredentials": { + "shape": "__boolean", + "documentation": "

Returns true if the JSON file provided by Google during registration process was used in the ServiceJson field of the request.

" + }, "Id": { "shape": "__string", "documentation": "

(Deprecated) An identifier for the GCM channel. This property is retained only for backward compatibility.

" @@ -9544,7 +9579,6 @@ }, "documentation": "

Provides information about the status and settings of the GCM channel for an application. The GCM channel enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service.

", "required": [ - "Credential", "Platform" ] }, @@ -9579,9 +9613,13 @@ "shape": "__string", "documentation": "

The URL of an image to display in the push notification.

" }, + "PreferredAuthenticationMethod": { + "shape": "__string", + "documentation": "

The preferred authentication method, with valid values \"KEY\" or \"TOKEN\". If a value isn't provided then the DefaultAuthenticationMethod is used.

" + }, "Priority": { "shape": "__string", - "documentation": "

para>normal - The notification might be delayed. Delivery is optimized for battery usage on the recipient's device. Use this value unless immediate delivery is required.

/listitem>
  • high - The notification is sent immediately and might wake a sleeping device.

  • /para>

    Amazon Pinpoint specifies this value in the FCM priority parameter when it sends the notification message to FCM.

    The equivalent values for Apple Push Notification service (APNs) are 5, for normal, and 10, for high. If you specify an APNs value for this property, Amazon Pinpoint accepts and converts the value to the corresponding FCM value.

    " + "documentation": "

    para>normal – The notification might be delayed. Delivery is optimized for battery usage on the recipient's device. Use this value unless immediate delivery is required.

    /listitem>
  • high – The notification is sent immediately and might wake a sleeping device.

  • /para>

    Amazon Pinpoint specifies this value in the FCM priority parameter when it sends the notification message to FCM.

    The equivalent values for Apple Push Notification service (APNs) are 5, for normal, and 10, for high. If you specify an APNs value for this property, Amazon Pinpoint accepts and converts the value to the corresponding FCM value.

    " }, "RawContent": { "shape": "__string", @@ -12074,8 +12112,16 @@ "documentation": "

    The maximum number of messages that the journey can send each second.

    " }, "EndpointReentryInterval": { - "shape": "__string", - "documentation": "

    Minimum time that must pass before an endpoint can re-enter a given journey. The duration should use an ISO 8601 format, such as PT1H.

    " + "shape": "__string", + "documentation": "

    Minimum time that must pass before an endpoint can re-enter a given journey. The duration should use an ISO 8601 format, such as PT1H.

    " + }, + "TimeframeCap": { + "shape": "JourneyTimeframeCap", + "documentation": "

    The number of messages that an endpoint can receive during the specified timeframe.

    " + }, + "TotalCap": { + "shape": "__integer", + "documentation": "

    The maximum number of messages a journey can sent to a single endpoint. The maximum value is 100. If set to 0, this limit will not apply.

    " } }, "documentation": "

    Specifies limits on the messages that a journey can send and the number of times participants can enter a journey.

    " @@ -12092,16 +12138,16 @@ }, "JourneyChannelSettings": { "type": "structure", - "members": { - "ConnectCampaignArn": { - "shape": "__string", - "documentation": "

    Amazon Resource Name (ARN) of the Connect Campaign.

    " - }, - "ConnectCampaignExecutionRoleArn": { - "shape": "__string", - "documentation": "

    IAM role ARN to be assumed when invoking Connect campaign execution APIs for dialing.

    " - } + "members": { + "ConnectCampaignArn": { + "shape": "__string", + "documentation": "

    Amazon Resource Name (ARN) of the Connect Campaign.

    " }, + "ConnectCampaignExecutionRoleArn": { + "shape": "__string", + "documentation": "

    IAM role ARN to be assumed when invoking Connect campaign execution APIs for dialing.

    " + } + }, "documentation": "

    The channel-specific configurations for the journey.

    " }, "JourneyResponse": { @@ -12181,16 +12227,16 @@ "documentation": "

    The channel-specific configurations for the journey.

    " }, "SendingSchedule": { - "shape": "__boolean", - "documentation": "

    Indicates if journey has Advance Quiet Time enabled. This flag should be set to true in order to allow using OpenHours and ClosedDays.

    " + "shape": "__boolean", + "documentation": "

    Indicates if journey has Advance Quiet Time enabled. This flag should be set to true in order to allow using OpenHours and ClosedDays.

    " }, "OpenHours": { - "shape": "OpenHours", - "documentation": "

    The time when a journey can send messages. QuietTime should be configured first and SendingSchedule should be set to true.

    " + "shape": "OpenHours", + "documentation": "

    The time when a journey can send messages. QuietTime should be configured first and SendingSchedule should be set to true.

    " }, "ClosedDays": { - "shape": "ClosedDays", - "documentation": "

    The time when a journey will not send messages. QuietTime should be configured first and SendingSchedule should be set to true.

    " + "shape": "ClosedDays", + "documentation": "

    The time when a journey will not send messages. QuietTime should be configured first and SendingSchedule should be set to true.

    " }, "TimezoneEstimationMethods": { "shape": "ListOf__TimezoneEstimationMethodsElement", @@ -12388,6 +12434,20 @@ }, "documentation": "

    Changes the status of a journey.

    " }, + "JourneyTimeframeCap": { + "type": "structure", + "members": { + "Cap": { + "shape": "__integer", + "documentation": "

    The maximum number of messages that all journeys can send to an endpoint during the specified timeframe. The maximum value is 100. If set to 0, this limit will not apply.

    " + }, + "Days": { + "shape": "__integer", + "documentation": "

    The length of the timeframe in days. The maximum value is 30. If set to 0, this limit will not apply.

    " + } + }, + "documentation": "

    The number of messages that can be sent to an endpoint during the specified timeframe for all journeys.

    " + }, "JourneysResponse": { "type": "structure", "members": { @@ -14436,6 +14496,10 @@ "VoiceTemplate": { "shape": "Template", "documentation": "

    The voice template to use for the message. This object isn't supported for campaigns.

    " + }, + "InAppTemplate": { + "shape": "Template", + "documentation": "

    The InApp template to use for the message. The InApp template object is not supported for SendMessages.

    " } }, "documentation": "

    Specifies the message template to use for the message, for each type of channel.

    " @@ -14492,7 +14556,7 @@ }, "TemplateType": { "shape": "TemplateType", - "documentation": "

    The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, and VOICE.

    " + "documentation": "

    The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, INAPP, and VOICE.

    " }, "Version": { "shape": "__string", @@ -14542,7 +14606,7 @@ }, "TemplateType": { "shape": "__string", - "documentation": "

    The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, and VOICE.

    " + "documentation": "

    The type of channel that the message template is designed for. Possible values are: EMAIL, PUSH, SMS, INAPP, and VOICE.

    " }, "Version": { "shape": "__string", @@ -15884,6 +15948,10 @@ "QuietTime": { "shape": "QuietTime", "documentation": "

    The default quiet time for campaigns in the application. Quiet time is a specific time range when messages aren't sent to endpoints, if all the following conditions are met:

    • The EndpointDemographic.Timezone property of the endpoint is set to a valid value.

    • The current time in the endpoint's time zone is later than or equal to the time specified by the QuietTime.Start property for the application (or a campaign or journey that has custom quiet time settings).

    • The current time in the endpoint's time zone is earlier than or equal to the time specified by the QuietTime.End property for the application (or a campaign or journey that has custom quiet time settings).

    If any of the preceding conditions isn't met, the endpoint will receive messages from a campaign or journey, even if quiet time is enabled.

    To override the default quiet time settings for a specific campaign or journey, use the Campaign resource or the Journey resource to define a custom quiet time for the campaign or journey.

    " + }, + "JourneyLimits": { + "shape": "ApplicationSettingsJourneyLimits", + "documentation": "

    The default sending limits for journeys in the application. These limits apply to each journey for the application but can be overridden, on a per journey basis, with the JourneyLimits resource.

    " } }, "documentation": "

    Specifies the default settings for an application.

    " @@ -16033,28 +16101,28 @@ "documentation": "

    The status of the journey. Valid values are:

    • DRAFT - Saves the journey and doesn't publish it.

    • ACTIVE - Saves and publishes the journey. Depending on the journey's schedule, the journey starts running immediately or at the scheduled start time. If a journey's status is ACTIVE, you can't add, change, or remove activities from it.

    PAUSED, CANCELLED, COMPLETED, and CLOSED states are not supported in requests to create or update a journey. To cancel, pause, or resume a journey, use the Journey State resource.

    " }, "WaitForQuietTime": { - "shape": "__boolean", - "documentation": "

    Specifies whether endpoints in quiet hours should enter a wait till the end of their quiet hours.

    " + "shape": "__boolean", + "documentation": "

    Specifies whether endpoints in quiet hours should enter a wait till the end of their quiet hours.

    " }, "RefreshOnSegmentUpdate": { - "shape": "__boolean", - "documentation": "

    Indicates whether the journey participants should be refreshed when a segment is updated.

    " + "shape": "__boolean", + "documentation": "

    Indicates whether the journey participants should be refreshed when a segment is updated.

    " }, "JourneyChannelSettings": { - "shape": "JourneyChannelSettings", - "documentation": "

    The channel-specific configurations for the journey.

    " + "shape": "JourneyChannelSettings", + "documentation": "

    The channel-specific configurations for the journey.

    " }, "SendingSchedule": { - "shape": "__boolean", - "documentation": "

    Indicates if journey has Advance Quiet Time enabled. This flag should be set to true in order to allow using OpenHours and ClosedDays.

    " + "shape": "__boolean", + "documentation": "

    Indicates if journey has Advance Quiet Time enabled. This flag should be set to true in order to allow using OpenHours and ClosedDays.

    " }, "OpenHours": { - "shape": "OpenHours", - "documentation": "

    The time when journey allow to send messages. QuietTime should be configured first and SendingSchedule should be set to true.

    " + "shape": "OpenHours", + "documentation": "

    The time when journey allow to send messages. QuietTime should be configured first and SendingSchedule should be set to true.

    " }, "ClosedDays": { - "shape": "ClosedDays", - "documentation": "

    The time when journey will stop sending messages. QuietTime should be configured first and SendingSchedule should be set to true.

    " + "shape": "ClosedDays", + "documentation": "

    The time when journey will stop sending messages. QuietTime should be configured first and SendingSchedule should be set to true.

    " }, "TimezoneEstimationMethods": { "shape": "ListOf__TimezoneEstimationMethodsElement", @@ -16496,124 +16564,124 @@ "timestampFormat": "unixTimestamp" }, "DayOfWeek": { - "type": "string", - "enum": [ - "MONDAY", - "TUESDAY", - "WEDNESDAY", - "THURSDAY", - "FRIDAY", - "SATURDAY", - "SUNDAY" - ] + "type": "string", + "enum": [ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] }, "OpenHoursRule": { - "type": "structure", - "documentation": "

    Specifies the start and end time for OpenHours.

    ", - "members": { - "StartTime": { - "shape": "__string", - "documentation": "

    The start of the scheduled time, in ISO 8601 format, when the channel can send messages.

    " - }, - "EndTime": { - "shape": "__string", - "documentation": "

    The end of the scheduled time, in ISO 8601 format, when the channel can't send messages.

    " - } + "type": "structure", + "documentation": "

    Specifies the start and end time for OpenHours.

    ", + "members": { + "StartTime": { + "shape": "__string", + "documentation": "

    The start of the scheduled time, in ISO 8601 format, when the channel can send messages.

    " + }, + "EndTime": { + "shape": "__string", + "documentation": "

    The end of the scheduled time, in ISO 8601 format, when the channel can't send messages.

    " + } } }, "ListOfOpenHoursRules": { - "type": "list", - "member": { - "shape": "OpenHoursRule", - "documentation": "

    Open Hour Rule Details.

    " - } + "type": "list", + "member": { + "shape": "OpenHoursRule", + "documentation": "

    Open Hour Rule Details.

    " + } }, "MapOfListOfOpenHoursRules": { - "type": "map", - "key": { - "shape": "DayOfWeek", - "documentation": "

    Day of a week when the rule will be applied. Valid values are [MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY]

    " - }, - "value": { - "shape": "ListOfOpenHoursRules", - "documentation": "

    Open Hour Rules.

    " - } + "type": "map", + "key": { + "shape": "DayOfWeek", + "documentation": "

    Day of a week when the rule will be applied. Valid values are [MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY]

    " + }, + "value": { + "shape": "ListOfOpenHoursRules", + "documentation": "

    Open Hour Rules.

    " + } }, "OpenHours": { - "type": "structure", - "documentation": "

    Specifies the times when message are allowed to be sent to endpoints.

    ", - "members": { - "EMAIL": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "

    Specifies the schedule settings for the email channel.

    " - }, - "SMS": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "

    Specifies the schedule settings for the SMS channel.

    " - }, - "PUSH": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "

    Specifies the schedule settings for the push channel.

    " - }, - "VOICE": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "

    Specifies the schedule settings for the voice channel.

    " - }, - "CUSTOM": { - "shape": "MapOfListOfOpenHoursRules", - "documentation": "

    Specifies the schedule settings for the custom channel.

    " - } + "type": "structure", + "documentation": "

    Specifies the times when message are allowed to be sent to endpoints.

    ", + "members": { + "EMAIL": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "

    Specifies the schedule settings for the email channel.

    " + }, + "SMS": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "

    Specifies the schedule settings for the SMS channel.

    " + }, + "PUSH": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "

    Specifies the schedule settings for the push channel.

    " + }, + "VOICE": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "

    Specifies the schedule settings for the voice channel.

    " + }, + "CUSTOM": { + "shape": "MapOfListOfOpenHoursRules", + "documentation": "

    Specifies the schedule settings for the custom channel.

    " + } } }, "ClosedDaysRule": { - "type": "structure", - "documentation": "

    Specifies the rule settings for when messages can't be sent.

    ", - "members": { - "Name": { - "shape": "__string", - "documentation": "

    The name of the closed day rule.

    " - }, - "StartDateTime": { - "shape": "__string", - "documentation": "

    Start DateTime ISO 8601 format

    " - }, - "EndDateTime": { - "shape": "__string", - "documentation": "

    End DateTime ISO 8601 format

    " - } + "type": "structure", + "documentation": "

    Specifies the rule settings for when messages can't be sent.

    ", + "members": { + "Name": { + "shape": "__string", + "documentation": "

    The name of the closed day rule.

    " + }, + "StartDateTime": { + "shape": "__string", + "documentation": "

    Start DateTime ISO 8601 format

    " + }, + "EndDateTime": { + "shape": "__string", + "documentation": "

    End DateTime ISO 8601 format

    " + } } }, "ListOfClosedDaysRules": { - "type": "list", - "member": { - "shape": "ClosedDaysRule", - "documentation": "

    ClosedDays rule details.

    " - } + "type": "list", + "member": { + "shape": "ClosedDaysRule", + "documentation": "

    ClosedDays rule details.

    " + } }, "ClosedDays": { - "type": "structure", - "documentation": "

    The time when a journey will not send messages. QuietTime should be configured first and SendingSchedule should be set to true.

    ", - "members": { - "EMAIL": { - "shape": "ListOfClosedDaysRules", - "documentation": "

    Rules for the Email channel.

    " - }, - "SMS": { - "shape": "ListOfClosedDaysRules", - "documentation": "

    Rules for the SMS channel.

    " - }, - "PUSH": { - "shape": "ListOfClosedDaysRules", - "documentation": "

    Rules for the Push channel.

    " - }, - "VOICE": { - "shape": "ListOfClosedDaysRules", - "documentation": "

    Rules for the Voice channel.

    " - }, - "CUSTOM": { - "shape": "ListOfClosedDaysRules", - "documentation": "

    Rules for the Custom channel.

    " - } + "type": "structure", + "documentation": "

    The time when a journey will not send messages. QuietTime should be configured first and SendingSchedule should be set to true.

    ", + "members": { + "EMAIL": { + "shape": "ListOfClosedDaysRules", + "documentation": "

    Rules for the Email channel.

    " + }, + "SMS": { + "shape": "ListOfClosedDaysRules", + "documentation": "

    Rules for the SMS channel.

    " + }, + "PUSH": { + "shape": "ListOfClosedDaysRules", + "documentation": "

    Rules for the Push channel.

    " + }, + "VOICE": { + "shape": "ListOfClosedDaysRules", + "documentation": "

    Rules for the Voice channel.

    " + }, + "CUSTOM": { + "shape": "ListOfClosedDaysRules", + "documentation": "

    Rules for the Custom channel.

    " + } } } } diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index c4d959f9a7c..57efc97b350 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index 31d4745331b..1661eb0a551 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/pinpointsmsvoicev2/pom.xml b/services/pinpointsmsvoicev2/pom.xml index 9e22f756294..84ac02000bb 100644 --- a/services/pinpointsmsvoicev2/pom.xml +++ b/services/pinpointsmsvoicev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT pinpointsmsvoicev2 AWS Java SDK :: Services :: Pinpoint SMS Voice V2 diff --git a/services/pipes/pom.xml b/services/pipes/pom.xml index 4d064aee286..212cf833281 100644 --- a/services/pipes/pom.xml +++ b/services/pipes/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT pipes AWS Java SDK :: Services :: Pipes diff --git a/services/polly/pom.xml b/services/polly/pom.xml index 339ef48478b..a021f772989 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index 0a69ffffcb1..1b35df9150e 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -422,7 +422,9 @@ "yue-CN", "ar-AE", "fi-FI", - "en-IE" + "en-IE", + "nl-BE", + "fr-BE" ] }, "LanguageCodeList":{ @@ -1104,7 +1106,9 @@ "Kazuha", "Tomoko", "Niamh", - "Sofie" + "Sofie", + "Lisa", + "Isabelle" ] }, "VoiceList":{ diff --git a/services/pom.xml b/services/pom.xml index 002ca873569..3ffc799d0be 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT services AWS Java SDK :: Services @@ -371,6 +371,8 @@ verifiedpermissions appfabric medicalimaging + entityresolution + managedblockchainquery The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index f1ccbac0a76..da21d3eef28 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 pricing diff --git a/services/privatenetworks/pom.xml b/services/privatenetworks/pom.xml index dce7abeb33e..b54ddebbd0b 100644 --- a/services/privatenetworks/pom.xml +++ b/services/privatenetworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT privatenetworks AWS Java SDK :: Services :: Private Networks diff --git a/services/proton/pom.xml b/services/proton/pom.xml index 6d6a8f8a79e..44b0a82984d 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index a77c65b7960..3deef7058ea 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index 2ceb6ef40f0..04225cd2f4a 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index 3beb8df4dad..b92d96a71ee 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/quicksight/src/main/resources/codegen-resources/endpoint-rule-set.json index 8eeea50afb8..5694f5ea82a 100644 --- a/services/quicksight/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/quicksight/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://quicksight-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://quicksight-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://quicksight-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://quicksight-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://quicksight.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://quicksight.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://quicksight.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://quicksight.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json index 18fac260cad..e5ce3441c3c 100644 --- a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json +++ b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json @@ -42,6 +42,30 @@ "limit_key": "MaxResults", "result_key": "DataSources" }, + "ListGroupMemberships": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GroupMemberList" + }, + "ListGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GroupList" + }, + "ListIAMPolicyAssignments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "IAMPolicyAssignments" + }, + "ListIAMPolicyAssignmentsForUser": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ActiveAssignments" + }, "ListIngestions": { "input_token": "NextToken", "output_token": "NextToken", @@ -89,6 +113,18 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListUserGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GroupList" + }, + "ListUsers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "UserList" + }, "ListVPCConnections": { "input_token": "NextToken", "output_token": "NextToken", @@ -117,6 +153,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "DataSourceSummaries" + }, + "SearchGroups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "GroupList" } } } diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index db2b5d68fa2..adde7fcdf86 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -467,6 +467,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"PreconditionNotMetException"}, {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ], @@ -1074,6 +1076,42 @@ ], "documentation":"

    Describes read and write permissions for a dashboard.

    " }, + "DescribeDashboardSnapshotJob":{ + "name":"DescribeDashboardSnapshotJob", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/snapshot-jobs/{SnapshotJobId}" + }, + "input":{"shape":"DescribeDashboardSnapshotJobRequest"}, + "output":{"shape":"DescribeDashboardSnapshotJobResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

    Describes an existing snapshot job.

    Poll job descriptions after a job starts to know the status of the job. For information on available status codes, see JobStatus.

    " + }, + "DescribeDashboardSnapshotJobResult":{ + "name":"DescribeDashboardSnapshotJobResult", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/snapshot-jobs/{SnapshotJobId}/result" + }, + "input":{"shape":"DescribeDashboardSnapshotJobResultRequest"}, + "output":{"shape":"DescribeDashboardSnapshotJobResultResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

    Describes the result of an existing snapshot job that has finished running.

    A finished snapshot job will return a COMPLETED or FAILED status when you poll the job with a DescribeDashboardSnapshotJob API call.

    If the job has not finished running, this operation returns a message that says Dashboard Snapshot Job with id <SnapshotjobId> has not reached a terminal state..

    " + }, "DescribeDataSet":{ "name":"DescribeDataSet", "http":{ @@ -2385,6 +2423,27 @@ ], "documentation":"

    Starts an Asset Bundle import job.

    An Asset Bundle import job imports specified Amazon QuickSight assets into an Amazon QuickSight account. You can also choose to import a naming prefix and specified configuration overrides. The assets that are contained in the bundle file that you provide are used to create or update a new or existing asset in your Amazon QuickSight account. Each Amazon QuickSight account can run up to 5 import jobs concurrently.

    The API caller must have the necessary \"create\", \"describe\", and \"update\" permissions in their IAM role to access each resource type that is contained in the bundle file before the resources can be imported.

    " }, + "StartDashboardSnapshotJob":{ + "name":"StartDashboardSnapshotJob", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/snapshot-jobs" + }, + "input":{"shape":"StartDashboardSnapshotJobRequest"}, + "output":{"shape":"StartDashboardSnapshotJobResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"UnsupportedPricingPlanException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

    Starts an asynchronous job that generates a dashboard snapshot. You can request one of the following format configurations per API call.

    • 1 paginated PDF

    • 5 CSVs

    Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once the job succeeds, use the DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates.

    " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -3022,10 +3081,20 @@ "AccountSubscriptionStatus":{ "shape":"String", "documentation":"

    The status of your account subscription.

    " + }, + "IAMIdentityCenterInstanceArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) for the IAM Identity Center instance.

    " } }, "documentation":"

    A structure that contains the following account information elements:

    • Your Amazon QuickSight account name.

    • The edition of Amazon QuickSight that your account is using.

    • The notification email address that is associated with the Amazon QuickSight account.

    • The authentication type of the Amazon QuickSight account.

    • The status of the Amazon QuickSight account's subscription.

    " }, + "AccountName":{ + "type":"string", + "max":62, + "min":1, + "pattern":"^(?!D-|d-)([\\da-zA-Z]+)([-]*[\\da-zA-Z])*" + }, "AccountSettings":{ "type":"structure", "members":{ @@ -3110,6 +3179,10 @@ "DateAggregationFunction":{ "shape":"DateAggregationFunction", "documentation":"

    Aggregation for date values.

    • COUNT: Aggregate by the total number of values, including duplicates.

    • DISTINCT_COUNT: Aggregate by the total number of distinct values.

    • MIN: Select the smallest date value.

    • MAX: Select the largest date value.

    " + }, + "AttributeAggregationFunction":{ + "shape":"AttributeAggregationFunction", + "documentation":"

    Aggregation for attributes.

    " } }, "documentation":"

    An aggregation function aggregates values from a dimension or measure.

    This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

    " @@ -3474,6 +3547,20 @@ }, "documentation":"

    The settings that you want to use with the Q search bar.

    " }, + "AnonymousUserSnapshotJobResult":{ + "type":"structure", + "members":{ + "FileGroups":{ + "shape":"SnapshotJobResultFileGroupList", + "documentation":"

    A list of SnapshotJobResultFileGroup objects that contain information on the files that are requested during a StartDashboardSnapshotJob API call. If the job succeeds, these objects contain the location where the snapshot artifacts are stored. If the job fails, the objects contain information about the error that caused the job to fail.

    " + } + }, + "documentation":"

    A structure that contains the file groups that are requested for the artifact generation in a StartDashboardSnapshotJob API call.

    " + }, + "AnonymousUserSnapshotJobResultList":{ + "type":"list", + "member":{"shape":"AnonymousUserSnapshotJobResult"} + }, "ArcAxisConfiguration":{ "type":"structure", "members":{ @@ -4301,6 +4388,20 @@ }, "documentation":"

    Parameters for Amazon Athena.

    " }, + "AttributeAggregationFunction":{ + "type":"structure", + "members":{ + "SimpleAttributeAggregation":{ + "shape":"SimpleAttributeAggregationFunction", + "documentation":"

    The built-in aggregation functions for attributes.

    • UNIQUE_VALUE: Returns the unique value for a field, aggregated by the dimension fields.

    " + }, + "ValueForMultipleValues":{ + "shape":"String", + "documentation":"

    Used by the UNIQUE_VALUE aggregation function. If there are multiple values for the field used by the aggregation, the value for this property will be returned instead. Defaults to '*'.

    " + } + }, + "documentation":"

    Aggregation for attributes.

    " + }, "AuroraParameters":{ "type":"structure", "required":[ @@ -4352,7 +4453,8 @@ "enum":[ "IAM_AND_QUICKSIGHT", "IAM_ONLY", - "ACTIVE_DIRECTORY" + "ACTIVE_DIRECTORY", + "IAM_IDENTITY_CENTER" ] }, "AuthorSpecifiedAggregation":{ @@ -6360,7 +6462,7 @@ "locationName":"AwsAccountId" }, "AccountName":{ - "shape":"String", + "shape":"AccountName", "documentation":"

    The name of your Amazon QuickSight account. This name is unique over all of Amazon Web Services, and it appears only when users sign in. You can't change AccountName value after the Amazon QuickSight account is created.

    " }, "NotificationEmail":{ @@ -9719,6 +9821,10 @@ "DateTimeFormat":{ "shape":"DateTimeFormat", "documentation":"

    Customize how dates are formatted in controls.

    " + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"

    The configuration of info icon label options.

    " } }, "documentation":"

    The display options of a control.

    " @@ -9915,7 +10021,12 @@ "MIN", "COUNT", "DISTINCT_COUNT", - "AVERAGE" + "AVERAGE", + "MEDIAN", + "STDEV", + "STDEVP", + "VAR", + "VARP" ] }, "DefaultFormatting":{ @@ -11641,6 +11752,149 @@ } } }, + "DescribeDashboardSnapshotJobRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "SnapshotJobId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

    The ID of the Amazon Web Services account that the dashboard snapshot job is executed in.

    ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The ID of the dashboard that you have started a snapshot job for.

    ", + "location":"uri", + "locationName":"DashboardId" + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The ID of the job to be described. The job ID is set when you start a new job with a StartDashboardSnapshotJob API call.

    ", + "location":"uri", + "locationName":"SnapshotJobId" + } + } + }, + "DescribeDashboardSnapshotJobResponse":{ + "type":"structure", + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

    The ID of the Amazon Web Services account that the dashboard snapshot job is executed in.

    " + }, + "DashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The ID of the dashboard that you have started a snapshot job for.

    " + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The ID of the job to be described. The job ID is set when you start a new job with a StartDashboardSnapshotJob API call.

    " + }, + "UserConfiguration":{ + "shape":"SnapshotUserConfigurationRedacted", + "documentation":"

    The user configuration for the snapshot job. This information is provided when you make a StartDashboardSnapshotJob API call.

    " + }, + "SnapshotConfiguration":{ + "shape":"SnapshotConfiguration", + "documentation":"

    The snapshot configuration of the job. This information is provided when you make a StartDashboardSnapshotJob API call.

    " + }, + "Arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the snapshot job. The job ARN is generated when you start a new job with a StartDashboardSnapshotJob API call.

    " + }, + "JobStatus":{ + "shape":"SnapshotJobStatus", + "documentation":"

    Indicates the status of a job. The status updates as the job executes. This shows one of the following values.

    • COMPLETED - The job was completed successfully.

    • FAILED - The job failed to execute.

    • QUEUED - The job is queued and hasn't started yet.

    • RUNNING - The job is still running.

    " + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

    The time that the snapshot job was created.

    " + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

    The time that the snapshot job status was last updated.

    " + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Web Services request ID for this operation.

    " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

    The HTTP status of the request

    " + } + } + }, + "DescribeDashboardSnapshotJobResultRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "SnapshotJobId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

    The ID of the Amazon Web Services account that the dashboard snapshot job is executed in.

    ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The ID of the dashboard that you have started a snapshot job for.

    ", + "location":"uri", + "locationName":"DashboardId" + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The ID of the job to be described. The job ID is set when you start a new job with a StartDashboardSnapshotJob API call.

    ", + "location":"uri", + "locationName":"SnapshotJobId" + } + } + }, + "DescribeDashboardSnapshotJobResultResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the snapshot job. The job ARN is generated when you start a new job with a StartDashboardSnapshotJob API call.

    " + }, + "JobStatus":{ + "shape":"SnapshotJobStatus", + "documentation":"

    Indicates the status of a job after it has reached a terminal state. A finished snapshot job will retuen a COMPLETED or FAILED status.

    " + }, + "CreatedTime":{ + "shape":"Timestamp", + "documentation":"

    The time that a snapshot job was created.

    " + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

    The time that a snapshot job status was last updated.

    " + }, + "Result":{ + "shape":"SnapshotJobResult", + "documentation":"

    The result of the snapshot job. Jobs that have successfully completed will return the S3Uri where they are located. Jobs that have failedwill return information on the error that caused the job to fail.

    " + }, + "ErrorInfo":{ + "shape":"SnapshotJobErrorInfo", + "documentation":"

    Displays information for the error that caused a job to fail.

    " + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Web Services request ID for this operation.

    " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

    The HTTP status of the request

    ", + "location":"statusCode" + } + } + }, "DescribeDataSetPermissionsRequest":{ "type":"structure", "required":[ @@ -13133,6 +13387,10 @@ "TitleOptions":{ "shape":"LabelOptions", "documentation":"

    The options to configure the title visibility, name, and font size.

    " + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"

    The configuration of info icon label options.

    " } }, "documentation":"

    The display options of a control.

    " @@ -13424,7 +13682,7 @@ "FieldOrderList":{ "type":"list", "member":{"shape":"FieldId"}, - "max":100 + "max":200 }, "FieldSeriesItem":{ "type":"structure", @@ -16138,7 +16396,8 @@ "type":"string", "enum":[ "IAM", - "QUICKSIGHT" + "QUICKSIGHT", + "IAM_IDENTITY_CENTER" ] }, "IdentityTypeNotSupportedException":{ @@ -17438,6 +17697,10 @@ "TitleOptions":{ "shape":"LabelOptions", "documentation":"

    The options to configure the title visibility, name, and font size.

    " + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"

    The configuration of info icon label options.

    " } }, "documentation":"

    The display options of a control.

    " @@ -20927,6 +21190,18 @@ "CollapsedRowDimensionsVisibility":{ "shape":"Visibility", "documentation":"

    The visibility setting of a pivot table's collapsed row dimension fields. If the value of this structure is HIDDEN, all collapsed columns in a pivot table are automatically hidden. The default value is VISIBLE.

    " + }, + "RowsLayout":{ + "shape":"PivotTableRowsLayout", + "documentation":"

    The layout for the row dimension headers of a pivot table. Choose one of the following options.

    • TABULAR: (Default) Each row field is displayed in a separate column.

    • HIERARCHY: All row fields are displayed in a single column. Indentation is used to differentiate row headers of different fields.

    " + }, + "RowsLabelOptions":{ + "shape":"PivotTableRowsLabelOptions", + "documentation":"

    The options for the label that is located above the row headers. This option is only applicable when RowsLayout is set to HIERARCHY.

    " + }, + "DefaultCellWidth":{ + "shape":"PixelLength", + "documentation":"

    The default cell width of the pivot table.

    " } }, "documentation":"

    The table options for a pivot table visual.

    " @@ -20945,6 +21220,32 @@ }, "documentation":"

    The paginated report options for a pivot table visual.

    " }, + "PivotTableRowsLabelOptions":{ + "type":"structure", + "members":{ + "Visibility":{ + "shape":"Visibility", + "documentation":"

    The visibility of the rows label.

    " + }, + "CustomLabel":{ + "shape":"PivotTableRowsLabelText", + "documentation":"

    The custom label string for the rows label.

    " + } + }, + "documentation":"

    The options for the label thta is located above the row headers. This option is only applicable when RowsLayout is set to HIERARCHY.

    " + }, + "PivotTableRowsLabelText":{ + "type":"string", + "max":1024, + "min":1 + }, + "PivotTableRowsLayout":{ + "type":"string", + "enum":[ + "TABULAR", + "HIERARCHY" + ] + }, "PivotTableSortBy":{ "type":"structure", "members":{ @@ -22089,6 +22390,10 @@ "DateTimeFormat":{ "shape":"DateTimeFormat", "documentation":"

    Customize how dates are formatted in controls.

    " + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"

    The configuration of info icon label options.

    " } }, "documentation":"

    The display options of a control.

    " @@ -22382,6 +22687,10 @@ "RowAlternateColors":{ "shape":"RowAlternateColorList", "documentation":"

    Determines the list of row alternate colors.

    " + }, + "UsePrimaryBackgroundColor":{ + "shape":"WidgetStatus", + "documentation":"

    The primary background color options for alternate rows.

    " } }, "documentation":"

    Determines the row alternate color options.

    " @@ -22528,6 +22837,29 @@ "max":1024, "min":1 }, + "S3BucketConfiguration":{ + "type":"structure", + "required":[ + "BucketName", + "BucketPrefix", + "BucketRegion" + ], + "members":{ + "BucketName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of an existing Amazon S3 bucket where the generated snapshot artifacts are sent.

    " + }, + "BucketPrefix":{ + "shape":"NonEmptyString", + "documentation":"

    The prefix of the Amazon S3 bucket that the generated snapshots are stored in.

    " + }, + "BucketRegion":{ + "shape":"NonEmptyString", + "documentation":"

    The region that the Amazon S3 bucket is located in. The bucket must be located in the same region that the StartDashboardSnapshotJob API call is made.

    " + } + }, + "documentation":"

    An optional structure that contains the Amazon S3 bucket configuration that the generated snapshots are stored in. If you don't provide this information, generated snapshots are stored in the default Amazon QuickSight bucket.

    " + }, "S3Key":{ "type":"string", "max":1024, @@ -23405,6 +23737,11 @@ "type":"long", "sensitive":true }, + "SensitiveS3Uri":{ + "type":"string", + "pattern":"^(https|s3)://([^/]+)/?(.*)$", + "sensitive":true + }, "SensitiveString":{ "type":"string", "sensitive":true @@ -23496,6 +23833,12 @@ "max":128, "min":1 }, + "SessionTagKeyList":{ + "type":"list", + "member":{"shape":"SessionTagKey"}, + "max":50, + "min":1 + }, "SessionTagList":{ "type":"list", "member":{"shape":"SessionTag"}, @@ -23568,6 +23911,25 @@ "DATE_RANGE" ] }, + "SheetControlInfoIconLabelOptions":{ + "type":"structure", + "members":{ + "Visibility":{ + "shape":"Visibility", + "documentation":"

    The visibility configuration of info icon label options.

    " + }, + "InfoIconText":{ + "shape":"SheetControlInfoIconText", + "documentation":"

    The text content of info icon.

    " + } + }, + "documentation":"

    A control to display info icons for filters and parameters.

    " + }, + "SheetControlInfoIconText":{ + "type":"string", + "max":100, + "min":1 + }, "SheetControlLayout":{ "type":"structure", "required":["Configuration"], @@ -23861,6 +24223,10 @@ }, "documentation":"

    A SignupResponse object that contains a summary of a newly created account.

    " }, + "SimpleAttributeAggregationFunction":{ + "type":"string", + "enum":["UNIQUE_VALUE"] + }, "SimpleClusterMarker":{ "type":"structure", "members":{ @@ -23898,6 +24264,10 @@ "TitleOptions":{ "shape":"LabelOptions", "documentation":"

    The options to configure the title visibility, name, and font size.

    " + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"

    The configuration of info icon label options.

    " } }, "documentation":"

    The display options of a control.

    " @@ -23961,6 +24331,279 @@ }, "documentation":"

    Options that determine the layout and display options of a chart's small multiples.

    " }, + "SnapshotAnonymousUser":{ + "type":"structure", + "members":{ + "RowLevelPermissionTags":{ + "shape":"SessionTagList", + "documentation":"

    The tags to be used for row-level security (RLS). Make sure that the relevant datasets have RLS tags configured before you start a snapshot export job. You can configure the RLS tags of a dataset with a DataSet$RowLevelPermissionTagConfiguration API call.

    These are not the tags that are used for Amazon Web Services resource tagging. For more information on row level security in Amazon QuickSight, see Using Row-Level Security (RLS) with Tagsin the Amazon QuickSight User Guide.

    " + } + }, + "documentation":"

    A structure that contains information on the anonymous user configuration.

    " + }, + "SnapshotAnonymousUserList":{ + "type":"list", + "member":{"shape":"SnapshotAnonymousUser"}, + "max":1, + "min":1 + }, + "SnapshotAnonymousUserRedacted":{ + "type":"structure", + "members":{ + "RowLevelPermissionTagKeys":{ + "shape":"SessionTagKeyList", + "documentation":"

    The tag keys for the RowLevelPermissionTags.

    " + } + }, + "documentation":"

    Use this structure to redact sensitive information that you provide about an anonymous user from the snapshot.

    " + }, + "SnapshotAnonymousUserRedactedList":{ + "type":"list", + "member":{"shape":"SnapshotAnonymousUserRedacted"}, + "max":1, + "min":1 + }, + "SnapshotConfiguration":{ + "type":"structure", + "required":["FileGroups"], + "members":{ + "FileGroups":{ + "shape":"SnapshotFileGroupList", + "documentation":"

    A list of SnapshotJobResultFileGroup objects that contain information about the snapshot that is generated. This list can hold a maximum of 6 FileGroup configurations.

    " + }, + "DestinationConfiguration":{ + "shape":"SnapshotDestinationConfiguration", + "documentation":"

    A structure that contains information on the Amazon S3 bucket that the generated snapshot is stored in.

    " + }, + "Parameters":{"shape":"Parameters"} + }, + "documentation":"

    Describes the configuration of the dashboard snapshot.

    " + }, + "SnapshotDestinationConfiguration":{ + "type":"structure", + "members":{ + "S3Destinations":{ + "shape":"SnapshotS3DestinationConfigurationList", + "documentation":"

    A list of SnapshotS3DestinationConfiguration objects that contain Amazon S3 destination configurations. This structure can hold a maximum of 1 S3DestinationConfiguration.

    " + } + }, + "documentation":"

    A structure that contains information on the Amazon S3 destinations of the generated snapshot.

    " + }, + "SnapshotFile":{ + "type":"structure", + "required":[ + "SheetSelections", + "FormatType" + ], + "members":{ + "SheetSelections":{ + "shape":"SnapshotFileSheetSelectionList", + "documentation":"

    A list of SnapshotFileSheetSelection objects that contain information on the dashboard sheet that is exported. These objects provide information about the snapshot artifacts that are generated during the job. This structure can hold a maximum of 5 CSV configurations or 1 configuration for PDF.

    " + }, + "FormatType":{ + "shape":"SnapshotFileFormatType", + "documentation":"

    The format of the snapshot file to be generated. You can choose between CSV or PDF.

    " + } + }, + "documentation":"

    A structure that contains the information for the snapshot that you want to generate. This information is provided by you when you start a new snapshot job.

    " + }, + "SnapshotFileFormatType":{ + "type":"string", + "enum":[ + "CSV", + "PDF" + ] + }, + "SnapshotFileGroup":{ + "type":"structure", + "members":{ + "Files":{ + "shape":"SnapshotFileList", + "documentation":"

    A list of SnapshotFile objects that contain the information on the snapshot files that need to be generated. This structure can hold 1 configuration at a time.

    " + } + }, + "documentation":"

    A structure that contains the information on the snapshot files.

    " + }, + "SnapshotFileGroupList":{ + "type":"list", + "member":{"shape":"SnapshotFileGroup"}, + "max":6, + "min":1 + }, + "SnapshotFileList":{ + "type":"list", + "member":{"shape":"SnapshotFile"}, + "max":1, + "min":1 + }, + "SnapshotFileSheetSelection":{ + "type":"structure", + "required":[ + "SheetId", + "SelectionScope" + ], + "members":{ + "SheetId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The sheet ID of the dashboard to generate the snapshot artifact from. This value is required for CSV and PDF format types.

    " + }, + "SelectionScope":{ + "shape":"SnapshotFileSheetSelectionScope", + "documentation":"

    The selection scope of the visuals on a sheet of a dashboard that you are generating a snapthot of. You can choose one of the following options.

    • ALL_VISUALS - Selects all visuals that are on the sheet. This value is required if the snapshot is a PDF.

    • SELECTED_VISUALS - Select the visual that you want to add to the snapshot. This value is required if the snapshot is a CSV.

    " + }, + "VisualIds":{ + "shape":"SnapshotFileSheetSelectionVisualIdList", + "documentation":"

    A list of visual IDs that are located in the selected sheet. This structure supports tables and pivot tables. This structure is required if you are generating a CSV. You can add a maximum of 1 visual ID to this structure.

    " + } + }, + "documentation":"

    A structure that contains information that identifies the snapshot that needs to be generated.

    " + }, + "SnapshotFileSheetSelectionList":{ + "type":"list", + "member":{"shape":"SnapshotFileSheetSelection"}, + "max":1, + "min":1 + }, + "SnapshotFileSheetSelectionScope":{ + "type":"string", + "enum":[ + "ALL_VISUALS", + "SELECTED_VISUALS" + ] + }, + "SnapshotFileSheetSelectionVisualIdList":{ + "type":"list", + "member":{"shape":"ShortRestrictiveResourceId"}, + "max":1, + "min":1 + }, + "SnapshotJobErrorInfo":{ + "type":"structure", + "members":{ + "ErrorMessage":{ + "shape":"String", + "documentation":"

    The error message.

    " + }, + "ErrorType":{ + "shape":"String", + "documentation":"

    The error type.

    " + } + }, + "documentation":"

    An object that contains information on the error that caused the snapshot job to fail.

    " + }, + "SnapshotJobResult":{ + "type":"structure", + "members":{ + "AnonymousUsers":{ + "shape":"AnonymousUserSnapshotJobResultList", + "documentation":"

    A list of AnonymousUserSnapshotJobResult objects that contain information on anonymous users and their user configurations. This data provided by you when you make a StartDashboardSnapshotJob API call.

    " + } + }, + "documentation":"

    An object that provides information on the result of a snapshot job. This object provides information about the job, the job status, and the location of the generated file.

    " + }, + "SnapshotJobResultErrorInfo":{ + "type":"structure", + "members":{ + "ErrorMessage":{ + "shape":"String", + "documentation":"

    The error message.

    " + }, + "ErrorType":{ + "shape":"String", + "documentation":"

    The error type.

    " + } + }, + "documentation":"

    Information on the error that caused the snapshot job to fail.

    " + }, + "SnapshotJobResultErrorInfoList":{ + "type":"list", + "member":{"shape":"SnapshotJobResultErrorInfo"} + }, + "SnapshotJobResultFileGroup":{ + "type":"structure", + "members":{ + "Files":{ + "shape":"SnapshotFileList", + "documentation":"

    A list of SnapshotFile objects.

    " + }, + "S3Results":{ + "shape":"SnapshotJobS3ResultList", + "documentation":"

    A list of SnapshotJobS3Result objects.

    " + } + }, + "documentation":"

    A structure that contains information on the generated snapshot file groups.

    " + }, + "SnapshotJobResultFileGroupList":{ + "type":"list", + "member":{"shape":"SnapshotJobResultFileGroup"} + }, + "SnapshotJobS3Result":{ + "type":"structure", + "members":{ + "S3DestinationConfiguration":{ + "shape":"SnapshotS3DestinationConfiguration", + "documentation":"

    A list of Amazon S3 bucket configurations that are provided when you make a StartDashboardSnapshotJob API call.

    " + }, + "S3Uri":{ + "shape":"SensitiveS3Uri", + "documentation":"

    The Amazon S3 Uri.

    " + }, + "ErrorInfo":{ + "shape":"SnapshotJobResultErrorInfoList", + "documentation":"

    An array of error records that describe any failures that occur while the dashboard snapshot job runs.

    " + } + }, + "documentation":"

    The Amazon S3 result from the snapshot job. The result includes the DestinationConfiguration and the Amazon S3 Uri. If an error occured during the job, the result returns information on the error.

    " + }, + "SnapshotJobS3ResultList":{ + "type":"list", + "member":{"shape":"SnapshotJobS3Result"} + }, + "SnapshotJobStatus":{ + "type":"string", + "enum":[ + "QUEUED", + "RUNNING", + "COMPLETED", + "FAILED" + ] + }, + "SnapshotS3DestinationConfiguration":{ + "type":"structure", + "members":{ + "BucketConfiguration":{ + "shape":"S3BucketConfiguration", + "documentation":"

    A structure that contains details about the Amazon S3 bucket that the generated dashboard snapshot is saved in.

    " + } + }, + "documentation":"

    A structure that describes the Amazon S3 settings to use to save the generated dashboard snapshot.

    " + }, + "SnapshotS3DestinationConfigurationList":{ + "type":"list", + "member":{"shape":"SnapshotS3DestinationConfiguration"}, + "max":1, + "min":1 + }, + "SnapshotUserConfiguration":{ + "type":"structure", + "members":{ + "AnonymousUsers":{ + "shape":"SnapshotAnonymousUserList", + "documentation":"

    An array of records that describe the anonymous users that the dashboard snapshot is generated for.

    " + } + }, + "documentation":"

    A structure that contains information about the users that the dashboard snapshot is generated for.

    " + }, + "SnapshotUserConfigurationRedacted":{ + "type":"structure", + "members":{ + "AnonymousUsers":{ + "shape":"SnapshotAnonymousUserRedactedList", + "documentation":"

    An array of records that describe anonymous users that the dashboard snapshot is generated for. Sensitive user information is excluded.

    " + } + }, + "documentation":"

    A structure that contains information about the users that the dashboard snapshot is generated for. Sensitive user information is excluded.

    " + }, "SnowflakeParameters":{ "type":"structure", "required":[ @@ -24195,6 +24838,64 @@ } } }, + "StartDashboardSnapshotJobRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "SnapshotJobId", + "UserConfiguration", + "SnapshotConfiguration" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

    The ID of the Amazon Web Services account that the dashboard snapshot job is executed in.

    ", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The ID of the dashboard that you want to start a snapshot job for.

    ", + "location":"uri", + "locationName":"DashboardId" + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    An ID for the dashboard snapshot job. This ID is unique to the dashboard while the job is running. This ID can be used to poll the status of a job with a DescribeDashboardSnapshotJob while the job runs. You can reuse this ID for another job 24 hours after the current job is completed.

    " + }, + "UserConfiguration":{ + "shape":"SnapshotUserConfiguration", + "documentation":"

    A structure that contains information about the anonymous users that the generated snapshot is for. This API will not return information about registered Amazon QuickSight.

    " + }, + "SnapshotConfiguration":{ + "shape":"SnapshotConfiguration", + "documentation":"

    A structure that describes the configuration of the dashboard snapshot.

    " + } + } + }, + "StartDashboardSnapshotJobResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) for the dashboard snapshot job.

    " + }, + "SnapshotJobId":{ + "shape":"ShortRestrictiveResourceId", + "documentation":"

    The ID of the job. The job ID is set when you start a new job with a StartDashboardSnapshotJob API call.

    " + }, + "RequestId":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Web Services request ID for this operation.

    " + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

    The HTTP status of the request

    ", + "location":"statusCode" + } + } + }, "StatePersistenceConfigurations":{ "type":"structure", "required":["Enabled"], @@ -24365,6 +25066,14 @@ }, "documentation":"

    The configuration that defines the default value of a String parameter when a value has not been set.

    " }, + "StyledCellType":{ + "type":"string", + "enum":[ + "TOTAL", + "METRIC_HEADER", + "VALUE" + ] + }, "SubnetId":{ "type":"string", "max":255, @@ -24407,6 +25116,10 @@ "MetricHeaderCellStyle":{ "shape":"TableCellStyle", "documentation":"

    The cell styling options for the subtotals of header cells.

    " + }, + "StyleTargets":{ + "shape":"TableStyleTargetList", + "documentation":"

    The style targets options for subtotals.

    " } }, "documentation":"

    The subtotal options.

    " @@ -24865,6 +25578,22 @@ }, "documentation":"

    The sort configuration for a TableVisual.

    " }, + "TableStyleTarget":{ + "type":"structure", + "required":["CellType"], + "members":{ + "CellType":{ + "shape":"StyledCellType", + "documentation":"

    The cell type of the table style target.

    " + } + }, + "documentation":"

    The table style target.

    " + }, + "TableStyleTargetList":{ + "type":"list", + "member":{"shape":"TableStyleTarget"}, + "max":3 + }, "TableTotalsPlacement":{ "type":"string", "enum":[ @@ -25332,6 +26061,10 @@ "PlaceholderOptions":{ "shape":"TextControlPlaceholderOptions", "documentation":"

    The configuration of the placeholder options in a text area control.

    " + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"

    The configuration of info icon label options.

    " } }, "documentation":"

    The display options of a control.

    " @@ -25374,6 +26107,10 @@ "PlaceholderOptions":{ "shape":"TextControlPlaceholderOptions", "documentation":"

    The configuration of the placeholder options in a text field control.

    " + }, + "InfoIconLabelOptions":{ + "shape":"SheetControlInfoIconLabelOptions", + "documentation":"

    The configuration of info icon label options.

    " } }, "documentation":"

    The display options of a control.

    " @@ -26085,6 +26822,10 @@ "CellValueSynonyms":{ "shape":"CellValueSynonyms", "documentation":"

    The other names or aliases for the calculated field cell value.

    " + }, + "NonAdditive":{ + "shape":"NullableBoolean", + "documentation":"

    The non additive for the table style target.

    " } }, "documentation":"

    A structure that represents a calculated field.

    " @@ -26160,7 +26901,7 @@ }, "Aggregation":{ "shape":"DefaultAggregation", - "documentation":"

    The type of aggregation that is performed on the column data when it's queried. Valid values for this structure are SUM, MAX, MIN, COUNT, DISTINCT_COUNT, and AVERAGE.

    " + "documentation":"

    The type of aggregation that is performed on the column data when it's queried.

    " }, "IsIncludedInTopic":{ "shape":"Boolean", @@ -26201,6 +26942,10 @@ "CellValueSynonyms":{ "shape":"CellValueSynonyms", "documentation":"

    The other names or aliases for the column cell value.

    " + }, + "NonAdditive":{ + "shape":"NullableBoolean", + "documentation":"

    The non additive value for the column.

    " } }, "documentation":"

    Represents a column in a dataset.

    " @@ -29007,7 +29752,8 @@ "enum":[ "TOP", "MIDDLE", - "BOTTOM" + "BOTTOM", + "AUTO" ] }, "Visibility":{ diff --git a/services/ram/pom.xml b/services/ram/pom.xml index 9116d25f574..9b04ef32463 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rbin/pom.xml b/services/rbin/pom.xml index 46988aec40f..86acca2d4ce 100644 --- a/services/rbin/pom.xml +++ b/services/rbin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT rbin AWS Java SDK :: Services :: Rbin diff --git a/services/rds/pom.xml b/services/rds/pom.xml index 8cf21196b19..9ea4980a58c 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/rds/src/main/resources/codegen-resources/endpoint-rule-set.json index b9aff9f06c8..00bd15c3610 100644 --- a/services/rds/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/rds/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rds-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://rds-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://rds.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://rds-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://rds.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://rds-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rds.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://rds.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://rds.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://rds.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index d2b0ff2172a..c50e1eb47d7 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -604,7 +604,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

    Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

    You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

    This operation applies only to Aurora DB clusters.

    " + "documentation":"

    Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

    You can create a global database that is initially empty, and then create the primary and secondary DB clusters in the global database. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

    This operation applies only to Aurora DB clusters.

    " }, "CreateOptionGroup":{ "name":"CreateOptionGroup", @@ -638,7 +638,7 @@ {"shape":"BlueGreenDeploymentNotFoundFault"}, {"shape":"InvalidBlueGreenDeploymentStateFault"} ], - "documentation":"

    Deletes a blue/green deployment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " + "documentation":"

    Deletes a blue/green deployment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " }, "DeleteCustomDBEngineVersion":{ "name":"DeleteCustomDBEngineVersion", @@ -673,10 +673,28 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterSnapshotAlreadyExistsFault"}, {"shape":"SnapshotQuotaExceededFault"}, - {"shape":"InvalidDBClusterSnapshotStateFault"} + {"shape":"InvalidDBClusterSnapshotStateFault"}, + {"shape":"DBClusterAutomatedBackupQuotaExceededFault"} ], "documentation":"

    The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

    If you're deleting a Multi-AZ DB cluster with read replicas, all cluster members are terminated and read replicas are promoted to standalone instances.

    For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

    " }, + "DeleteDBClusterAutomatedBackup":{ + "name":"DeleteDBClusterAutomatedBackup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBClusterAutomatedBackupMessage"}, + "output":{ + "shape":"DeleteDBClusterAutomatedBackupResult", + "resultWrapper":"DeleteDBClusterAutomatedBackupResult" + }, + "errors":[ + {"shape":"InvalidDBClusterAutomatedBackupStateFault"}, + {"shape":"DBClusterAutomatedBackupNotFoundFault"} + ], + "documentation":"

    Deletes automated backups using the DbClusterResourceId value of the source DB cluster or the Amazon Resource Name (ARN) of the automated backups.

    " + }, "DeleteDBClusterEndpoint":{ "name":"DeleteDBClusterEndpoint", "http":{ @@ -947,7 +965,7 @@ "errors":[ {"shape":"BlueGreenDeploymentNotFoundFault"} ], - "documentation":"

    Returns information about blue/green deployments.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " + "documentation":"

    Describes one or more blue/green deployments.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " }, "DescribeCertificates":{ "name":"DescribeCertificates", @@ -965,6 +983,22 @@ ], "documentation":"

    Lists the set of CA certificates provided by Amazon RDS for this Amazon Web Services account.

    For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

    " }, + "DescribeDBClusterAutomatedBackups":{ + "name":"DescribeDBClusterAutomatedBackups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBClusterAutomatedBackupsMessage"}, + "output":{ + "shape":"DBClusterAutomatedBackupMessage", + "resultWrapper":"DescribeDBClusterAutomatedBackupsResult" + }, + "errors":[ + {"shape":"DBClusterAutomatedBackupNotFoundFault"} + ], + "documentation":"

    Displays backups for both current and deleted DB clusters. For example, use this operation to find details about automated backups for previously deleted clusters. Current clusters are returned for both the DescribeDBClusterAutomatedBackups and DescribeDBClusters operations.

    All parameters are optional.

    " + }, "DescribeDBClusterBacktracks":{ "name":"DescribeDBClusterBacktracks", "http":{ @@ -1952,7 +1986,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBInstanceStateFault"} ], - "documentation":"

    Modify a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    This action only applies to Aurora DB clusters.

    " + "documentation":"

    Modifies a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    This operation only applies to Aurora global database clusters.

    " }, "ModifyOptionGroup":{ "name":"ModifyOptionGroup", @@ -2290,7 +2324,8 @@ {"shape":"OptionGroupNotFoundFault"}, {"shape":"StorageQuotaExceededFault"}, {"shape":"DomainNotFoundFault"}, - {"shape":"DBClusterParameterGroupNotFoundFault"} + {"shape":"DBClusterParameterGroupNotFoundFault"}, + {"shape":"DBClusterAutomatedBackupNotFoundFault"} ], "documentation":"

    Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

    For Aurora, this action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

    For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

    " }, @@ -2620,7 +2655,7 @@ {"shape":"BlueGreenDeploymentNotFoundFault"}, {"shape":"InvalidBlueGreenDeploymentStateFault"} ], - "documentation":"

    Switches over a blue/green deployment.

    Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " + "documentation":"

    Switches over a blue/green deployment.

    Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " }, "SwitchoverReadReplica":{ "name":"SwitchoverReadReplica", @@ -3021,7 +3056,7 @@ "members":{ "BlueGreenDeploymentIdentifier":{ "shape":"BlueGreenDeploymentIdentifier", - "documentation":"

    The system-generated identifier of the blue/green deployment.

    " + "documentation":"

    The unique identifier of the blue/green deployment.

    " }, "BlueGreenDeploymentName":{ "shape":"BlueGreenDeploymentName", @@ -3045,7 +3080,7 @@ }, "Status":{ "shape":"BlueGreenDeploymentStatus", - "documentation":"

    The status of the blue/green deployment.

    Values:

    • PROVISIONING - Resources are being created in the green environment.

    • AVAILABLE - Resources are available in the green environment.

    • SWITCHOVER_IN_PROGRESS - The deployment is being switched from the blue environment to the green environment.

    • SWITCHOVER_COMPLETED - Switchover from the blue environment to the green environment is complete.

    • INVALID_CONFIGURATION - Resources in the green environment are invalid, so switchover isn't possible.

    • SWITCHOVER_FAILED - Switchover was attempted but failed.

    • DELETING - The blue/green deployment is being deleted.

    " + "documentation":"

    The status of the blue/green deployment.

    Valid Values:

    • PROVISIONING - Resources are being created in the green environment.

    • AVAILABLE - Resources are available in the green environment.

    • SWITCHOVER_IN_PROGRESS - The deployment is being switched from the blue environment to the green environment.

    • SWITCHOVER_COMPLETED - Switchover from the blue environment to the green environment is complete.

    • INVALID_CONFIGURATION - Resources in the green environment are invalid, so switchover isn't possible.

    • SWITCHOVER_FAILED - Switchover was attempted but failed.

    • DELETING - The blue/green deployment is being deleted.

    " }, "StatusDetails":{ "shape":"BlueGreenDeploymentStatusDetails", @@ -3053,15 +3088,15 @@ }, "CreateTime":{ "shape":"TStamp", - "documentation":"

    Specifies the time when the blue/green deployment was created, in Universal Coordinated Time (UTC).

    " + "documentation":"

    The time when the blue/green deployment was created, in Universal Coordinated Time (UTC).

    " }, "DeleteTime":{ "shape":"TStamp", - "documentation":"

    Specifies the time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).

    " + "documentation":"

    The time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).

    " }, "TagList":{"shape":"TagList"} }, - "documentation":"

    Contains the details about a blue/green deployment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " + "documentation":"

    Details about a blue/green deployment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " }, "BlueGreenDeploymentAlreadyExistsFault":{ "type":"structure", @@ -3114,10 +3149,10 @@ }, "Status":{ "shape":"BlueGreenDeploymentTaskStatus", - "documentation":"

    The status of the blue/green deployment task.

    Values:

    • PENDING - The resources are being prepared for deployment.

    • IN_PROGRESS - The resource is being deployed.

    • COMPLETED - The resource has been deployed.

    • FAILED - Deployment of the resource failed.

    " + "documentation":"

    The status of the blue/green deployment task.

    Valid Values:

    • PENDING - The resource is being prepared for deployment.

    • IN_PROGRESS - The resource is being deployed.

    • COMPLETED - The resource has been deployed.

    • FAILED - Deployment of the resource failed.

    " } }, - "documentation":"

    Contains the details about a task for a blue/green deployment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " + "documentation":"

    Details about a task for a blue/green deployment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " }, "BlueGreenDeploymentTaskList":{ "type":"list", @@ -3862,6 +3897,10 @@ "MasterUserSecretKmsKeyId":{ "shape":"String", "documentation":"

    The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

    This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB cluster.

    The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

    If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

    There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

    Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

    " + }, + "EnableLocalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"

    Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

    Valid for: Aurora DB clusters only

    " } }, "documentation":"

    " @@ -3943,7 +3982,7 @@ "members":{ "DBName":{ "shape":"String", - "documentation":"

    The meaning of this parameter differs depending on the database engine.

    Amazon Aurora MySQL

    The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster.

    Constraints:

    • Must contain 1 to 64 alphanumeric characters.

    • Can't be a word reserved by the database engine.

    Amazon Aurora PostgreSQL

    The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created.

    Default: postgres

    Constraints:

    • Must contain 1 to 63 alphanumeric characters.

    • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).

    • Can't be a word reserved by the database engine.

    Amazon RDS Custom for Oracle

    The Oracle System ID (SID) of the created RDS Custom DB instance.

    Default: ORCL

    Constraints:

    • Must contain 1 to 8 alphanumeric characters.

    • Must contain a letter.

    • Can't be a word reserved by the database engine.

    Amazon RDS Custom for SQL Server

    Not applicable. Must be null.

    RDS for MariaDB

    The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.

    Constraints:

    • Must contain 1 to 64 letters or numbers.

    • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    • Can't be a word reserved by the database engine.

    RDS for MySQL

    The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance.

    Constraints:

    • Must contain 1 to 64 letters or numbers.

    • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    • Can't be a word reserved by the database engine.

    RDS for Oracle

    The Oracle System ID (SID) of the created DB instance.

    Default: ORCL

    Constraints:

    • Can't be longer than 8 characters.

    • Can't be a word reserved by the database engine, such as the string NULL.

    RDS for PostgreSQL

    The name of the database to create when the DB instance is created.

    Default: postgres

    Constraints:

    • Must contain 1 to 63 letters, numbers, or underscores.

    • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    • Can't be a word reserved by the database engine.

    RDS for SQL Server

    Not applicable. Must be null.

    " + "documentation":"

    The meaning of this parameter differs according to the database engine you use.

    MySQL

    The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 letters or numbers.

    • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    • Can't be a word reserved by the specified database engine

    MariaDB

    The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

    Constraints:

    • Must contain 1 to 64 letters or numbers.

    • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    • Can't be a word reserved by the specified database engine

    PostgreSQL

    The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance.

    Constraints:

    • Must contain 1 to 63 letters, numbers, or underscores.

    • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    • Can't be a word reserved by the specified database engine

    Oracle

    The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName.

    Default: ORCL

    Constraints:

    • Can't be longer than 8 characters

    Amazon RDS Custom for Oracle

    The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs.

    Default: ORCL

    Constraints:

    • It must contain 1 to 8 alphanumeric characters.

    • It must contain a letter.

    • It can't be a word reserved by the database engine.

    Amazon RDS Custom for SQL Server

    Not applicable. Must be null.

    SQL Server

    Not applicable. Must be null.

    Amazon Aurora MySQL

    The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.

    Constraints:

    • It must contain 1 to 64 alphanumeric characters.

    • It can't be a word reserved by the database engine.

    Amazon Aurora PostgreSQL

    The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster.

    Constraints:

    • It must contain 1 to 63 alphanumeric characters.

    • It must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).

    • It can't be a word reserved by the database engine.

    " }, "DBInstanceIdentifier":{ "shape":"String", @@ -4176,6 +4215,10 @@ "CACertificateIdentifier":{ "shape":"String", "documentation":"

    The CA certificate identifier to use for the DB instance's server certificate.

    This setting doesn't apply to RDS Custom DB instances.

    For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

    " + }, + "DBSystemId":{ + "shape":"String", + "documentation":"

    The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. In this context, the term \"Oracle database instance\" refers exclusively to the system global area (SGA) and Oracle background processes. If you don't specify a SID, the value defaults to RDSCDB. The Oracle SID is also the name of your CDB.

    " } }, "documentation":"

    " @@ -5070,6 +5113,10 @@ "IOOptimizedNextAllowedModificationTime":{ "shape":"TStamp", "documentation":"

    The next time you can modify the DB cluster to use the aurora-iopt1 storage type.

    This setting is only for Aurora DB clusters.

    " + }, + "LocalWriteForwardingStatus":{ + "shape":"LocalWriteForwardingStatus", + "documentation":"

    Specifies whether an Aurora DB cluster has in-cluster write forwarding enabled, not enabled, requested, or is in the process of enabling it.

    " } }, "documentation":"

    Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.

    For an Amazon Aurora DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.

    For a Multi-AZ DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.

    For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

    ", @@ -5087,6 +5134,146 @@ }, "exception":true }, + "DBClusterAutomatedBackup":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

    The name of the database engine for this automated backup.

    " + }, + "VpcId":{ + "shape":"String", + "documentation":"

    The VPC ID associated with the DB cluster.

    " + }, + "DBClusterAutomatedBackupsArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) for the automated backups.

    " + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

    The identifier for the source DB cluster, which can't be changed and which is unique to an Amazon Web Services Region.

    " + }, + "RestoreWindow":{"shape":"RestoreWindow"}, + "MasterUsername":{ + "shape":"String", + "documentation":"

    The master user name of the automated backup.

    " + }, + "DbClusterResourceId":{ + "shape":"String", + "documentation":"

    The resource ID for the source DB cluster, which can't be changed and which is unique to an Amazon Web Services Region.

    " + }, + "Region":{ + "shape":"String", + "documentation":"

    The Amazon Web Services Region associated with the automated backup.

    " + }, + "LicenseModel":{ + "shape":"String", + "documentation":"

    The license model information for this DB cluster automated backup.

    " + }, + "Status":{ + "shape":"String", + "documentation":"

    A list of status information for an automated backup:

    • retained - Automated backups for deleted clusters.

    " + }, + "IAMDatabaseAuthenticationEnabled":{ + "shape":"Boolean", + "documentation":"

    True if mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.

    " + }, + "ClusterCreateTime":{ + "shape":"TStamp", + "documentation":"

    The time when the DB cluster was created, in Universal Coordinated Time (UTC).

    " + }, + "StorageEncrypted":{ + "shape":"Boolean", + "documentation":"

    Specifies whether the source DB cluster is encrypted.

    " + }, + "AllocatedStorage":{ + "shape":"Integer", + "documentation":"

    For all database engines except Amazon Aurora, AllocatedStorage specifies the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage always returns 1, because Aurora DB cluster storage size isn't fixed, but instead automatically adjusts as needed.

    " + }, + "EngineVersion":{ + "shape":"String", + "documentation":"

    The version of the database engine for the automated backup.

    " + }, + "DBClusterArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) for the source DB cluster.

    " + }, + "BackupRetentionPeriod":{ + "shape":"IntegerOptional", + "documentation":"

    The retention period for the automated backups.

    " + }, + "EngineMode":{ + "shape":"String", + "documentation":"

    The engine mode of the database engine for the automated backup.

    " + }, + "AvailabilityZones":{ + "shape":"AvailabilityZones", + "documentation":"

    The Availability Zones where instances in the DB cluster can be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones.

    " + }, + "Port":{ + "shape":"Integer", + "documentation":"

    The port number that the automated backup used for connections.

    Default: Inherits from the source DB cluster

    Valid Values: 1150-65535

    " + }, + "KmsKeyId":{ + "shape":"String", + "documentation":"

    The Amazon Web Services KMS key ID for an automated backup.

    The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.

    " + }, + "StorageType":{ + "shape":"String", + "documentation":"

    The storage type associated with the DB cluster.

    This setting is only for non-Aurora Multi-AZ DB clusters.

    " + }, + "Iops":{ + "shape":"IntegerOptional", + "documentation":"

    The IOPS (I/O operations per second) value for the automated backup.

    This setting is only for non-Aurora Multi-AZ DB clusters.

    " + } + }, + "documentation":"

    An automated backup of a DB cluster. It consists of system backups, transaction logs, and the database cluster properties that existed at the time you deleted the source cluster.

    ", + "wrapper":true + }, + "DBClusterAutomatedBackupList":{ + "type":"list", + "member":{ + "shape":"DBClusterAutomatedBackup", + "locationName":"DBClusterAutomatedBackup" + } + }, + "DBClusterAutomatedBackupMessage":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    " + }, + "DBClusterAutomatedBackups":{ + "shape":"DBClusterAutomatedBackupList", + "documentation":"

    A list of DBClusterAutomatedBackup backups.

    " + } + } + }, + "DBClusterAutomatedBackupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

    No automated backup for this DB cluster was found.

    ", + "error":{ + "code":"DBClusterAutomatedBackupNotFoundFault", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBClusterAutomatedBackupQuotaExceededFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The quota for retained automated backups was exceeded. This prevents you from retaining any additional automated backups. The retained automated backups quota is the same as your DB cluster quota.

    ", + "error":{ + "code":"DBClusterAutomatedBackupQuotaExceededFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "DBClusterBacktrack":{ "type":"structure", "members":{ @@ -5597,7 +5784,7 @@ }, "DBClusterSnapshotArn":{ "shape":"String", - "documentation":"

    The Amazon Resource Name (ARN) for the DB cluster snapshot.

    " + "documentation":"

    Specifies the Amazon Resource Name (ARN) for the DB cluster snapshot.

    " }, "SourceDBClusterSnapshotArn":{ "shape":"String", @@ -5615,6 +5802,10 @@ "StorageType":{ "shape":"String", "documentation":"

    The storage type associated with the DB cluster snapshot.

    This setting is only for Aurora DB clusters.

    " + }, + "DbClusterResourceId":{ + "shape":"String", + "documentation":"

    Specifies the resource ID of the DB cluster that this DB cluster snapshot was created from.

    " } }, "documentation":"

    Contains the details for an Amazon RDS DB cluster snapshot

    This data type is used as a response element in the DescribeDBClusterSnapshots action.

    ", @@ -5824,6 +6015,10 @@ "SupportedCACertificateIdentifiers":{ "shape":"CACertificateIdentifiersList", "documentation":"

    A list of the supported CA certificate identifiers.

    For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

    " + }, + "SupportsLocalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"

    A value that indicates whether the DB engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

    Valid for: Aurora DB clusters only

    " } }, "documentation":"

    This data type is used as a response element in the action DescribeDBEngineVersions.

    " @@ -5878,7 +6073,7 @@ }, "DBName":{ "shape":"String", - "documentation":"

    The meaning of this parameter differs depending on the database engine.

    • For RDS for MariaDB, Microsoft SQL Server, MySQL, and PostgreSQL - The name of the initial database specified for this DB instance when it was created, if one was provided. This same name is returned for the life of the DB instance.

    • For RDS for Oracle - The Oracle System ID (SID) of the created DB instance. This value is only returned when the object returned is an Oracle DB instance.

    " + "documentation":"

    Contains the initial database name that you provided (if required) when you created the DB instance. This name is returned for the life of your DB instance. For an RDS for Oracle CDB instance, the name identifies the PDB rather than the CDB.

    " }, "Endpoint":{ "shape":"Endpoint", @@ -6172,6 +6367,10 @@ "ReadReplicaSourceDBClusterIdentifier":{ "shape":"String", "documentation":"

    The identifier of the source DB cluster if this DB instance is a read replica.

    " + }, + "PercentProgress":{ + "shape":"String", + "documentation":"

    The progress of the storage optimization operation as a percentage.

    " } }, "documentation":"

    Contains the details of an Amazon RDS DB instance.

    This data type is used as a response element in the operations CreateDBInstance, CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance.

    ", @@ -6198,7 +6397,7 @@ }, "DbiResourceId":{ "shape":"String", - "documentation":"

    The identifier for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.

    " + "documentation":"

    The resource ID for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.

    " }, "Region":{ "shape":"String", @@ -6206,7 +6405,7 @@ }, "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

    The customer id of the instance that is/was associated with the automated backup.

    " + "documentation":"

    The identifier for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region.

    " }, "RestoreWindow":{ "shape":"RestoreWindow", @@ -6218,7 +6417,7 @@ }, "Status":{ "shape":"String", - "documentation":"

    Provides a list of status information for an automated backup:

    • active - automated backups for current instances

    • retained - automated backups for deleted instances

    • creating - automated backups that are waiting for the first automated snapshot to be available.

    " + "documentation":"

    Provides a list of status information for an automated backup:

    • active - Automated backups for current instances.

    • retained - Automated backups for deleted instances.

    • creating - Automated backups that are waiting for the first automated snapshot to be available.

    " }, "Port":{ "shape":"Integer", @@ -6238,7 +6437,7 @@ }, "MasterUsername":{ "shape":"String", - "documentation":"

    The license model of an automated backup.

    " + "documentation":"

    The master user name of an automated backup.

    " }, "Engine":{ "shape":"String", @@ -6345,7 +6544,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The quota for retained automated backups was exceeded. This prevents you from retaining any additional automated backups. The retained automated backups quota is the same as your DB Instance quota.

    ", + "documentation":"

    The quota for retained automated backups was exceeded. This prevents you from retaining any additional automated backups. The retained automated backups quota is the same as your DB instance quota.

    ", "error":{ "code":"DBInstanceAutomatedBackupQuotaExceeded", "httpStatusCode":400, @@ -7245,6 +7444,10 @@ "StorageThroughput":{ "shape":"IntegerOptional", "documentation":"

    Specifies the storage throughput for the DB snapshot.

    " + }, + "DBSystemId":{ + "shape":"String", + "documentation":"

    The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. The Oracle SID is also the name of your CDB.

    " } }, "documentation":"

    Contains the details of an Amazon RDS DB snapshot.

    This data type is used as a response element in the DescribeDBSnapshots action.

    ", @@ -7484,11 +7687,11 @@ "members":{ "BlueGreenDeploymentIdentifier":{ "shape":"BlueGreenDeploymentIdentifier", - "documentation":"

    The blue/green deployment identifier of the deployment to be deleted. This parameter isn't case-sensitive.

    Constraints:

    • Must match an existing blue/green deployment identifier.

    " + "documentation":"

    The unique identifier of the blue/green deployment to delete. This parameter isn't case-sensitive.

    Constraints:

    • Must match an existing blue/green deployment identifier.

    " }, "DeleteTarget":{ "shape":"BooleanOptional", - "documentation":"

    A value that indicates whether to delete the resources in the green environment. You can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED.

    " + "documentation":"

    Specifies whether to delete the resources in the green environment. You can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED.

    " } } }, @@ -7515,6 +7718,22 @@ } } }, + "DeleteDBClusterAutomatedBackupMessage":{ + "type":"structure", + "required":["DbClusterResourceId"], + "members":{ + "DbClusterResourceId":{ + "shape":"String", + "documentation":"

    The identifier for the source DB cluster, which can't be changed and which is unique to an Amazon Web Services Region.

    " + } + } + }, + "DeleteDBClusterAutomatedBackupResult":{ + "type":"structure", + "members":{ + "DBClusterAutomatedBackup":{"shape":"DBClusterAutomatedBackup"} + } + }, "DeleteDBClusterEndpointMessage":{ "type":"structure", "required":["DBClusterEndpointIdentifier"], @@ -7540,6 +7759,10 @@ "FinalDBSnapshotIdentifier":{ "shape":"String", "documentation":"

    The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled.

    Specifying this parameter and also skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter results in an error.

    Constraints:

    • Must be 1 to 255 letters, numbers, or hyphens.

    • First character must be a letter

    • Can't end with a hyphen or contain two consecutive hyphens

    " + }, + "DeleteAutomatedBackups":{ + "shape":"BooleanOptional", + "documentation":"

    A value that indicates whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted.

    " } }, "documentation":"

    " @@ -7797,19 +8020,19 @@ "members":{ "BlueGreenDeploymentIdentifier":{ "shape":"BlueGreenDeploymentIdentifier", - "documentation":"

    The blue/green deployment identifier. If this parameter is specified, information from only the specific blue/green deployment is returned. This parameter isn't case-sensitive.

    Constraints:

    • If supplied, must match an existing blue/green deployment identifier.

    " + "documentation":"

    The blue/green deployment identifier. If you specify this parameter, the response only includes information about the specific blue/green deployment. This parameter isn't case-sensitive.

    Constraints:

    • Must match an existing blue/green deployment identifier.

    " }, "Filters":{ "shape":"FilterList", - "documentation":"

    A filter that specifies one or more blue/green deployments to describe.

    Supported filters:

    • blue-green-deployment-identifier - Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers.

    • blue-green-deployment-name - Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names.

    • source - Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases.

    • target - Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.

    " + "documentation":"

    A filter that specifies one or more blue/green deployments to describe.

    Valid Values:

    • blue-green-deployment-identifier - Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers.

    • blue-green-deployment-name - Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names.

    • source - Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases.

    • target - Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.

    " }, "Marker":{ "shape":"String", - "documentation":"

    An optional pagination token provided by a previous DescribeBlueGreenDeployments request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

    " + "documentation":"

    An optional pagination token provided by a previous DescribeBlueGreenDeployments request. If you specify this parameter, the response only includes records beyond the marker, up to the value specified by MaxRecords.

    " }, "MaxRecords":{ "shape":"MaxRecords", - "documentation":"

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    " + "documentation":"

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

    Default: 100

    Constraints:

    • Must be a minimum of 20.

    • Can't exceed 100.

    " } } }, @@ -7818,11 +8041,11 @@ "members":{ "BlueGreenDeployments":{ "shape":"BlueGreenDeploymentList", - "documentation":"

    Contains a list of blue/green deployments for the user.

    " + "documentation":"

    A list of blue/green deployments in the current account and Amazon Web Services Region.

    " }, "Marker":{ "shape":"String", - "documentation":"

    A pagination token that can be used in a later DescribeBlueGreenDeployments request.

    " + "documentation":"

    A pagination token that can be used in a later DescribeBlueGreenDeployments request.

    " } } }, @@ -7848,6 +8071,31 @@ }, "documentation":"

    " }, + "DescribeDBClusterAutomatedBackupsMessage":{ + "type":"structure", + "members":{ + "DbClusterResourceId":{ + "shape":"String", + "documentation":"

    The resource ID of the DB cluster that is the source of the automated backup. This parameter isn't case-sensitive.

    " + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

    (Optional) The user-supplied DB cluster identifier. If this parameter is specified, it must match the identifier of an existing DB cluster. It returns information from the specific DB cluster's automated backup. This parameter isn't case-sensitive.

    " + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

    A filter that specifies which resources to return based on status.

    Supported filters are the following:

    • status

      • retained - Automated backups for deleted clusters and after backup replication is stopped.

    • db-cluster-id - Accepts DB cluster identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB cluster automated backups identified by these ARNs.

    • db-cluster-resource-id - Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB cluster resources identified by these ARNs.

    Returns all resources by default. The status for each resource is specified in the response.

    " + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results.

    " + }, + "Marker":{ + "shape":"String", + "documentation":"

    The pagination token provided in the previous request. If this parameter is specified the response includes only records beyond the marker, up to MaxRecords.

    " + } + } + }, "DescribeDBClusterBacktracksMessage":{ "type":"structure", "required":["DBClusterIdentifier"], @@ -8000,6 +8248,10 @@ "IncludePublic":{ "shape":"Boolean", "documentation":"

    A value that indicates whether to include manual DB cluster snapshots that are public and can be copied or restored by any Amazon Web Services account. By default, the public snapshots are not included.

    You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute API action.

    " + }, + "DbClusterResourceId":{ + "shape":"String", + "documentation":"

    A specific DB cluster resource ID to describe.

    " } }, "documentation":"

    " @@ -8084,11 +8336,11 @@ }, "DBInstanceIdentifier":{ "shape":"String", - "documentation":"

    (Optional) The user-supplied instance identifier. If this parameter is specified, it must match the identifier of an existing DB instance. It returns information from the specific DB instance' automated backup. This parameter isn't case-sensitive.

    " + "documentation":"

    (Optional) The user-supplied instance identifier. If this parameter is specified, it must match the identifier of an existing DB instance. It returns information from the specific DB instance's automated backup. This parameter isn't case-sensitive.

    " }, "Filters":{ "shape":"FilterList", - "documentation":"

    A filter that specifies which resources to return based on status.

    Supported filters are the following:

    • status

      • active - automated backups for current instances

      • retained - automated backups for deleted instances and after backup replication is stopped

      • creating - automated backups that are waiting for the first automated snapshot to be available

    • db-instance-id - Accepts DB instance identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance automated backups identified by these ARNs.

    • dbi-resource-id - Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance resources identified by these ARNs.

    Returns all resources by default. The status for each resource is specified in the response.

    " + "documentation":"

    A filter that specifies which resources to return based on status.

    Supported filters are the following:

    • status

      • active - Automated backups for current instances.

      • creating - Automated backups that are waiting for the first automated snapshot to be available.

      • retained - Automated backups for deleted instances and after backup replication is stopped.

    • db-instance-id - Accepts DB instance identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance automated backups identified by these ARNs.

    • dbi-resource-id - Accepts DB resource identifiers and Amazon Resource Names (ARNs). The results list includes only information about the DB instance resources identified by these ARNs.

    Returns all resources by default. The status for each resource is specified in the response.

    " }, "MaxRecords":{ "shape":"IntegerOptional", @@ -9837,6 +10089,18 @@ }, "exception":true }, + "InvalidDBClusterAutomatedBackupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The automated backup is in an invalid state. For example, this automated backup is associated with an active cluster.

    ", + "error":{ + "code":"InvalidDBClusterAutomatedBackupStateFault", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidDBClusterCapacityFault":{ "type":"structure", "members":{ @@ -10162,6 +10426,16 @@ }, "documentation":"

    " }, + "LocalWriteForwardingStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled", + "enabling", + "disabling", + "requested" + ] + }, "LogTypeList":{ "type":"list", "member":{"shape":"String"} @@ -10506,6 +10780,10 @@ "AllowEngineModeChange":{ "shape":"Boolean", "documentation":"

    Specifies whether engine mode changes from serverless to provisioned are allowed.

    Valid for Cluster Type: Aurora Serverless v1 DB clusters only

    Constraints:

    • You must allow engine mode changes when specifying a different value for the EngineMode parameter from the DB cluster's current engine mode.

    " + }, + "EnableLocalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"

    Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

    Valid for: Aurora DB clusters only

    " } }, "documentation":"

    " @@ -10664,7 +10942,7 @@ }, "CACertificateIdentifier":{ "shape":"String", - "documentation":"

    The CA certificate identifier to use for the DB instance6's server certificate.

    This setting doesn't apply to RDS Custom DB instances.

    For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

    " + "documentation":"

    The CA certificate identifier to use for the DB instance's server certificate.

    This setting doesn't apply to RDS Custom DB instances.

    For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

    " }, "Domain":{ "shape":"String", @@ -11059,23 +11337,23 @@ "members":{ "GlobalClusterIdentifier":{ "shape":"String", - "documentation":"

    The DB cluster identifier for the global cluster being modified. This parameter isn't case-sensitive.

    Constraints:

    • Must match the identifier of an existing global database cluster.

    " + "documentation":"

    The cluster identifier for the global cluster to modify. This parameter isn't case-sensitive.

    Constraints:

    • Must match the identifier of an existing global database cluster.

    " }, "NewGlobalClusterIdentifier":{ "shape":"String", - "documentation":"

    The new cluster identifier for the global database cluster when modifying a global database cluster. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 letters, numbers, or hyphens

    • The first character must be a letter

    • Can't end with a hyphen or contain two consecutive hyphens

    Example: my-cluster2

    " + "documentation":"

    The new cluster identifier for the global database cluster. This value is stored as a lowercase string.

    Constraints:

    • Must contain from 1 to 63 letters, numbers, or hyphens.

    • The first character must be a letter.

    • Can't end with a hyphen or contain two consecutive hyphens.

    Example: my-cluster2

    " }, "DeletionProtection":{ "shape":"BooleanOptional", - "documentation":"

    Indicates if the global database cluster has deletion protection enabled. The global database cluster can't be deleted when deletion protection is enabled.

    " + "documentation":"

    Specifies whether to enable deletion protection for the global database cluster. The global database cluster can't be deleted when deletion protection is enabled.

    " }, "EngineVersion":{ "shape":"String", - "documentation":"

    The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.

    To list all of the available engine versions for aurora-mysql (for MySQL-based Aurora global databases), use the following command:

    aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'

    To list all of the available engine versions for aurora-postgresql (for PostgreSQL-based Aurora global databases), use the following command:

    aws rds describe-db-engine-versions --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'

    " + "documentation":"

    The version number of the database engine to which you want to upgrade.

    To list all of the available engine versions for aurora-mysql (for MySQL-based Aurora global databases), use the following command:

    aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'

    To list all of the available engine versions for aurora-postgresql (for PostgreSQL-based Aurora global databases), use the following command:

    aws rds describe-db-engine-versions --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'

    " }, "AllowMajorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

    A value that indicates whether major version upgrades are allowed.

    Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version.

    If you upgrade the major version of a global database, the cluster and DB instance parameter groups are set to the default parameter groups for the new version. Apply any custom parameter groups after completing the upgrade.

    " + "documentation":"

    Specifies whether to allow major version upgrades.

    Constraints: Must be enabled if you specify a value for the EngineVersion parameter that's a different major version than the global cluster's current version.

    If you upgrade the major version of a global database, the cluster and DB instance parameter groups are set to the default parameter groups for the new version. Apply any custom parameter groups after completing the upgrade.

    " } } }, @@ -12888,10 +13166,7 @@ }, "RestoreDBClusterToPointInTimeMessage":{ "type":"structure", - "required":[ - "DBClusterIdentifier", - "SourceDBClusterIdentifier" - ], + "required":["DBClusterIdentifier"], "members":{ "DBClusterIdentifier":{ "shape":"String", @@ -12994,6 +13269,10 @@ "NetworkType":{ "shape":"String", "documentation":"

    The network type of the DB cluster.

    Valid values:

    • IPV4

    • DUAL

    The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

    For more information, see Working with a DB instance in a VPC in the Amazon Aurora User Guide.

    Valid for: Aurora DB clusters only

    " + }, + "SourceDbClusterResourceId":{ + "shape":"String", + "documentation":"

    The resource ID of the source DB cluster from which to restore.

    " } }, "documentation":"

    " @@ -14249,11 +14528,11 @@ "members":{ "BlueGreenDeploymentIdentifier":{ "shape":"BlueGreenDeploymentIdentifier", - "documentation":"

    The blue/green deployment identifier.

    Constraints:

    • Must match an existing blue/green deployment identifier.

    " + "documentation":"

    The unique identifier of the blue/green deployment.

    Constraints:

    • Must match an existing blue/green deployment identifier.

    " }, "SwitchoverTimeout":{ "shape":"SwitchoverTimeout", - "documentation":"

    The amount of time, in seconds, for the switchover to complete. The default is 300.

    If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.

    " + "documentation":"

    The amount of time, in seconds, for the switchover to complete.

    Default: 300

    If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.

    " } } }, @@ -14465,6 +14744,10 @@ "SupportsBabelfish":{ "shape":"BooleanOptional", "documentation":"

    A value that indicates whether you can use Babelfish for Aurora PostgreSQL with the target engine version.

    " + }, + "SupportsLocalWriteForwarding":{ + "shape":"BooleanOptional", + "documentation":"

    A value that indicates whether the target engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.

    Valid for: Aurora DB clusters only

    " } }, "documentation":"

    The version of the database engine that a DB instance can be upgraded to.

    " diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index 54af0b52d5e..6259decdc6d 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index 2e02a399004..d1a33631b3d 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 6333d14130d..2a37d4d50ab 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftserverless/pom.xml b/services/redshiftserverless/pom.xml index a467bfd777a..0795179ed93 100644 --- a/services/redshiftserverless/pom.xml +++ b/services/redshiftserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT redshiftserverless AWS Java SDK :: Services :: Redshift Serverless diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index 445d37737d9..f5864f27140 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/rekognition/src/main/resources/codegen-resources/endpoint-rule-set.json index 3972626b553..c30bffadd5f 100644 --- a/services/rekognition/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/rekognition/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rekognition-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://rekognition-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://rekognition-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://rekognition-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rekognition.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://rekognition.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://rekognition.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://rekognition.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/rekognition/src/main/resources/codegen-resources/service-2.json b/services/rekognition/src/main/resources/codegen-resources/service-2.json index 2aff6686e93..e23d8110f3d 100644 --- a/services/rekognition/src/main/resources/codegen-resources/service-2.json +++ b/services/rekognition/src/main/resources/codegen-resources/service-2.json @@ -112,7 +112,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset.

    To create a training dataset for a project, specify train for the value of DatasetType. To create the test dataset for a project, specify test for the value of DatasetType.

    The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE.

    To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines.

    Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information.

    For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide.

    This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.

    " + "documentation":"

    Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset.

    To create a training dataset for a project, specify TRAIN for the value of DatasetType. To create the test dataset for a project, specify TEST for the value of DatasetType.

    The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE.

    To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines.

    Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information.

    For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide.

    This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.

    " }, "CreateFaceLivenessSession":{ "name":"CreateFaceLivenessSession", @@ -129,7 +129,7 @@ {"shape":"ThrottlingException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

    This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. You can use AuditImagesLimit to limit the number of audit images returned. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video.

    ", + "documentation":"

    This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session.

    You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. If no Amazon S3 bucket is defined, raw bytes are sent instead.

    You can use AuditImagesLimit to limit the number of audit images returned when GetFaceLivenessSessionResults is called. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video.

    ", "idempotent":true }, "CreateProject":{ @@ -517,7 +517,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

    Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature.

    For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide.

    You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

    Optional Parameters

    You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color.

    When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image.

    You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels.

    Response Elements

    For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels:

    • Name - The name of the detected label.

    • Confidence - The level of confidence in the label assigned to a detected object.

    • Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response.

    • Aliases - Possible Aliases for the label.

    • Categories - The label categories that the detected label belongs to.

    • BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box.

    The API returns the following information regarding the image, as part of the ImageProperties structure:

    • Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground.

    • Dominant Color - An array of the dominant colors in the image.

    • Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground.

    • Background - Information about the sharpness, brightness, and dominant colors of the input image’s background.

    The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label:

    {Name: lighthouse, Confidence: 98.4629}

    {Name: rock,Confidence: 79.2097}

    {Name: sea,Confidence: 75.061}

    The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.

    {Name: flower,Confidence: 99.0562}

    {Name: plant,Confidence: 99.0562}

    {Name: tulip,Confidence: 99.0562}

    In this example, the detection algorithm more precisely identifies the flower as a tulip.

    If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides.

    This is a stateless API operation that doesn't return any data.

    This operation requires permissions to perform the rekognition:DetectLabels action.

    " + "documentation":"

    Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature.

    For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide.

    You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

    Optional Parameters

    You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color.

    When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image.

    When getting labels, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. These arguments are only valid when supplying GENERAL_LABELS as a feature type.

    Response Elements

    For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels:

    • Name - The name of the detected label.

    • Confidence - The level of confidence in the label assigned to a detected object.

    • Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response.

    • Aliases - Possible Aliases for the label.

    • Categories - The label categories that the detected label belongs to.

    • BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box.

    The API returns the following information regarding the image, as part of the ImageProperties structure:

    • Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground.

    • Dominant Color - An array of the dominant colors in the image.

    • Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground.

    • Background - Information about the sharpness, brightness, and dominant colors of the input image’s background.

    The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label:

    {Name: lighthouse, Confidence: 98.4629}

    {Name: rock,Confidence: 79.2097}

    {Name: sea,Confidence: 75.061}

    The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels.

    {Name: flower,Confidence: 99.0562}

    {Name: plant,Confidence: 99.0562}

    {Name: tulip,Confidence: 99.0562}

    In this example, the detection algorithm more precisely identifies the flower as a tulip.

    If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides.

    This is a stateless API operation that doesn't return any data.

    This operation requires permissions to perform the rekognition:DetectLabels action.

    " }, "DetectModerationLabels":{ "name":"DetectModerationLabels", @@ -692,7 +692,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection.

    Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection.

    GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected.

    Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.

    " + "documentation":"

    Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection.

    Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection.

    GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected.

    Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.

    Note that for the GetFaceDetection operation, the returned values for FaceOccluded and EyeDirection will always be \"null\".

    " }, "GetFaceLivenessSessionResults":{ "name":"GetFaceLivenessSessionResults", @@ -710,7 +710,7 @@ {"shape":"ThrottlingException"}, {"shape":"ProvisionedThroughputExceededException"} ], - "documentation":"

    Retrieves the results of a specific Face Liveness session. It requires the sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The reference image can optionally be returned.

    " + "documentation":"

    Retrieves the results of a specific Face Liveness session. It requires the sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100.

    The number of audit images returned by GetFaceLivenessSessionResults is defined by the AuditImagesLimit paramater when calling CreateFaceLivenessSession. Reference images are always returned when possible.

    " }, "GetFaceSearch":{ "name":"GetFaceSearch", @@ -805,7 +805,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection.

    Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by calling StartTextDetection which returns a job identifier (JobId) When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartTextDetection. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection.

    GetTextDetection returns an array of detected text (TextDetections) sorted by the time the text was detected, up to 50 words per frame of video.

    Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines.

    Use MaxResults parameter to limit the number of text detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection and populate the NextToken request parameter with the token value returned from the previous call to GetTextDetection.

    " + "documentation":"

    Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection.

    Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by calling StartTextDetection which returns a job identifier (JobId) When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartTextDetection. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection.

    GetTextDetection returns an array of detected text (TextDetections) sorted by the time the text was detected, up to 100 words per frame of video.

    Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines.

    Use MaxResults parameter to limit the number of text detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection and populate the NextToken request parameter with the token value returned from the previous call to GetTextDetection.

    " }, "IndexFaces":{ "name":"IndexFaces", @@ -2113,7 +2113,7 @@ }, "DatasetType":{ "shape":"DatasetType", - "documentation":"

    The type of the dataset. Specify train to create a training dataset. Specify test to create a test dataset.

    " + "documentation":"

    The type of the dataset. Specify TRAIN to create a training dataset. Specify TEST to create a test dataset.

    " }, "ProjectArn":{ "shape":"ProjectArn", @@ -2943,7 +2943,7 @@ }, "Attributes":{ "shape":"Attributes", - "documentation":"

    An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using [\"DEFAULT\", \"FACE_OCCLUDED\"] or just [\"FACE_OCCLUDED\"]. You can request for all facial attributes by using [\"ALL\"]. Requesting more attributes may increase response time.

    If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical \"AND\" operator to determine which attributes to return (in this case, all attributes).

    " + "documentation":"

    An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using [\"DEFAULT\", \"FACE_OCCLUDED\"] or just [\"FACE_OCCLUDED\"]. You can request for all facial attributes by using [\"ALL\"]. Requesting more attributes may increase response time.

    If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical \"AND\" operator to determine which attributes to return (in this case, all attributes).

    Note that while the FaceOccluded and EyeDirection attributes are supported when using DetectFaces, they aren't supported when analyzing videos with StartFaceDetection and GetFaceDetection.

    " } } }, @@ -3066,11 +3066,11 @@ }, "MaxLabels":{ "shape":"UInteger", - "documentation":"

    Maximum number of labels you want the service to return in the response. The service returns the specified number of highest confidence labels.

    " + "documentation":"

    Maximum number of labels you want the service to return in the response. The service returns the specified number of highest confidence labels. Only valid when GENERAL_LABELS is specified as a feature type in the Feature input parameter.

    " }, "MinConfidence":{ "shape":"Percent", - "documentation":"

    Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value.

    If MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 55 percent.

    " + "documentation":"

    Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value.

    If MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 55 percent. Only valid when GENERAL_LABELS is specified as a feature type in the Feature input parameter.

    " }, "Features":{ "shape":"DetectLabelsFeatureList", @@ -4049,7 +4049,7 @@ }, "AuditImages":{ "shape":"AuditImages", - "documentation":"

    A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration.

    " + "documentation":"

    A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration. If no Amazon S3 bucket is defined, raw bytes are sent instead.

    " } } }, @@ -5051,11 +5051,11 @@ }, "UserId":{ "shape":"UserId", - "documentation":"

    An array of user IDs to match when listing faces in a collection.

    " + "documentation":"

    An array of user IDs to filter results with when listing faces in a collection.

    " }, "FaceIds":{ "shape":"FaceIdList", - "documentation":"

    An array of face IDs to match when listing faces in a collection.

    " + "documentation":"

    An array of face IDs to filter results with when listing faces in a collection.

    " } } }, diff --git a/services/resiliencehub/pom.xml b/services/resiliencehub/pom.xml index ee697621315..33b5841bb5d 100644 --- a/services/resiliencehub/pom.xml +++ b/services/resiliencehub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT resiliencehub AWS Java SDK :: Services :: Resiliencehub diff --git a/services/resiliencehub/src/main/resources/codegen-resources/paginators-1.json b/services/resiliencehub/src/main/resources/codegen-resources/paginators-1.json index ce7bf05b483..6d700c46674 100644 --- a/services/resiliencehub/src/main/resources/codegen-resources/paginators-1.json +++ b/services/resiliencehub/src/main/resources/codegen-resources/paginators-1.json @@ -5,6 +5,11 @@ "output_token": "nextToken", "limit_key": "maxResults" }, + "ListAppAssessmentComplianceDrifts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults" + }, "ListAppAssessments": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/resiliencehub/src/main/resources/codegen-resources/service-2.json b/services/resiliencehub/src/main/resources/codegen-resources/service-2.json index 87a985a8fb7..15ae6ad1aee 100644 --- a/services/resiliencehub/src/main/resources/codegen-resources/service-2.json +++ b/services/resiliencehub/src/main/resources/codegen-resources/service-2.json @@ -31,6 +31,24 @@ ], "documentation":"

    Adds the resource mapping for the draft application version. You can also update an existing resource mapping to a new physical resource.

    " }, + "BatchUpdateRecommendationStatus":{ + "name":"BatchUpdateRecommendationStatus", + "http":{ + "method":"POST", + "requestUri":"/batch-update-recommendation-status", + "responseCode":200 + }, + "input":{"shape":"BatchUpdateRecommendationStatusRequest"}, + "output":{"shape":"BatchUpdateRecommendationStatusResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Enables you to include or exclude one or more operational recommendations.

    " + }, "CreateApp":{ "name":"CreateApp", "http":{ @@ -49,7 +67,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

    Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe an Resilience Hub application, you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate resiliency policy. For more information about the number of resources supported per application, see Service Quotas.

    After you create an Resilience Hub application, you publish it so that you can run a resiliency assessment on it. You can then use recommendations from the assessment to improve resiliency by running another assessment, comparing results, and then iterating the process until you achieve your goals for recovery time objective (RTO) and recovery point objective (RPO).

    " + "documentation":"

    Creates an Resilience Hub application. An Resilience Hub application is a collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application, you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate resiliency policy. In addition, you can also add resources that are located on Amazon Elastic Kubernetes Service (Amazon EKS) clusters as optional resources. For more information about the number of resources supported per application, see Service quotas.

    After you create an Resilience Hub application, you publish it so that you can run a resiliency assessment on it. You can then use recommendations from the assessment to improve resiliency by running another assessment, comparing results, and then iterating the process until you achieve your goals for recovery time objective (RTO) and recovery point objective (RPO).

    " }, "CreateAppVersionAppComponent":{ "name":"CreateAppVersionAppComponent", @@ -438,6 +456,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} @@ -462,6 +481,23 @@ ], "documentation":"

    Lists the alarm recommendations for an Resilience Hub application.

    " }, + "ListAppAssessmentComplianceDrifts":{ + "name":"ListAppAssessmentComplianceDrifts", + "http":{ + "method":"POST", + "requestUri":"/list-app-assessment-compliance-drifts", + "responseCode":200 + }, + "input":{"shape":"ListAppAssessmentComplianceDriftsRequest"}, + "output":{"shape":"ListAppAssessmentComplianceDriftsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    List of compliance drifts that were detected while running an assessment.

    " + }, "ListAppAssessments":{ "name":"ListAppAssessments", "http":{ @@ -1003,7 +1039,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "resourceMappings":{ "shape":"ResourceMappingList", @@ -1021,7 +1057,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -1029,7 +1065,7 @@ }, "resourceMappings":{ "shape":"ResourceMappingList", - "documentation":"

    Mappings used to map logical resources from the template to physical resources. You can use the mapping type CFN_STACK if the application template uses a logical stack name. Or you can map individual resources by using the mapping type RESOURCE. We recommend using the mapping type CFN_STACK if the application is backed by a CloudFormation stack.

    " + "documentation":"

    List of sources that are used to map a logical resource from the template to a physical resource. You can use sources such as CloudFormation, Terraform state files, AppRegistry applications, or Amazon EKS.

    " } } }, @@ -1055,19 +1091,25 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"

    The Application Component for the CloudWatch alarm recommendation.

    " + "documentation":"

    Application Component name for the CloudWatch alarm recommendation. This name is saved as the first item in the appComponentNames list.

    ", + "deprecated":true, + "deprecatedMessage":"An alarm recommendation can be attached to multiple Application Components, hence this property will be replaced by the new property 'appComponentNames'." + }, + "appComponentNames":{ + "shape":"AppComponentNameList", + "documentation":"

    List of Application Component names for the CloudWatch alarm recommendation.

    " }, "description":{ "shape":"EntityDescription", - "documentation":"

    The description of the recommendation.

    " + "documentation":"

    Description of the alarm recommendation.

    " }, "items":{ "shape":"RecommendationItemList", - "documentation":"

    The list of CloudWatch alarm recommendations.

    " + "documentation":"

    List of CloudWatch alarm recommendations.

    " }, "name":{ "shape":"String500", - "documentation":"

    The name of the alarm recommendation.

    " + "documentation":"

    Name of the alarm recommendation.

    " }, "prerequisite":{ "shape":"String500", @@ -1075,15 +1117,15 @@ }, "recommendationId":{ "shape":"Uuid", - "documentation":"

    The identifier of the alarm recommendation.

    " + "documentation":"

    Identifier of the alarm recommendation.

    " }, "referenceId":{ "shape":"SpecReferenceId", - "documentation":"

    The reference identifier of the alarm recommendation.

    " + "documentation":"

    Reference identifier of the alarm recommendation.

    " }, "type":{ "shape":"AlarmType", - "documentation":"

    The type of alarm recommendation.

    " + "documentation":"

    Type of alarm recommendation.

    " } }, "documentation":"

    Defines a recommendation for a CloudWatch alarm.

    " @@ -1118,51 +1160,67 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "assessmentSchedule":{ "shape":"AppAssessmentScheduleType", - "documentation":"

    Assessment execution schedule with 'Daily' or 'Disabled' values.

    " + "documentation":"

    Assessment execution schedule with 'Daily' or 'Disabled' values.

    " }, "complianceStatus":{ "shape":"AppComplianceStatusType", - "documentation":"

    The current status of compliance for the resiliency policy.

    " + "documentation":"

    Current status of compliance for the resiliency policy.

    " }, "creationTime":{ "shape":"TimeStamp", - "documentation":"

    The timestamp for when the app was created.

    " + "documentation":"

    Timestamp for when the app was created.

    " }, "description":{ "shape":"EntityDescription", - "documentation":"

    The optional description for an app.

    " + "documentation":"

    Optional description for an application.

    " + }, + "driftStatus":{ + "shape":"AppDriftStatusType", + "documentation":"

    Indicates if compliance drifts (deviations) were detected while running an assessment for your application.

    " + }, + "eventSubscriptions":{ + "shape":"EventSubscriptionList", + "documentation":"

    The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected and Scheduled assessment failure events.

    " }, "lastAppComplianceEvaluationTime":{ "shape":"TimeStamp", - "documentation":"

    The timestamp for the most recent compliance evaluation.

    " + "documentation":"

    Timestamp for the most recent compliance evaluation.

    " + }, + "lastDriftEvaluationTime":{ + "shape":"TimeStamp", + "documentation":"

    Indicates the last time that a drift was evaluated.

    " }, "lastResiliencyScoreEvaluationTime":{ "shape":"TimeStamp", - "documentation":"

    The timestamp for the most recent resiliency score evaluation.

    " + "documentation":"

    Timestamp for the most recent resiliency score evaluation.

    " }, "name":{ "shape":"EntityName", - "documentation":"

    The name for the application.

    " + "documentation":"

    Name for the application.

    " + }, + "permissionModel":{ + "shape":"PermissionModel", + "documentation":"

    Defines the roles and credentials that Resilience Hub would use while creating the application, importing its resources, and running an assessment.

    " }, "policyArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "resiliencyScore":{ "shape":"Double", - "documentation":"

    The current resiliency score for the application.

    " + "documentation":"

    Current resiliency score for the application.

    " }, "status":{ "shape":"AppStatusType", - "documentation":"

    The status of the application.

    " + "documentation":"

    Status of the application.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " } }, "documentation":"

    Defines an Resilience Hub application.

    " @@ -1177,39 +1235,43 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The version of the application.

    " + "documentation":"

    Version of an application.

    " }, "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "assessmentName":{ "shape":"EntityName", - "documentation":"

    The name of the assessment.

    " + "documentation":"

    Name of the assessment.

    " }, "assessmentStatus":{ "shape":"AssessmentStatus", - "documentation":"

    The current status of the assessment for the resiliency policy.

    " + "documentation":"

    Current status of the assessment for the resiliency policy.

    " }, "compliance":{ "shape":"AssessmentCompliance", - "documentation":"

    The application compliance against the resiliency policy.

    " + "documentation":"

    Application compliance against the resiliency policy.

    " }, "complianceStatus":{ "shape":"ComplianceStatus", - "documentation":"

    The current status of the compliance for the resiliency policy.

    " + "documentation":"

    Current status of the compliance for the resiliency policy.

    " }, "cost":{ "shape":"Cost", - "documentation":"

    The cost for the application.

    " + "documentation":"

    Cost for the application.

    " + }, + "driftStatus":{ + "shape":"DriftStatus", + "documentation":"

    Indicates if compliance drifts (deviations) were detected while running an assessment for your application.

    " }, "endTime":{ "shape":"TimeStamp", - "documentation":"

    The end time for the action.

    " + "documentation":"

    End time for the action.

    " }, "invoker":{ "shape":"AssessmentInvoker", @@ -1221,11 +1283,11 @@ }, "policy":{ "shape":"ResiliencyPolicy", - "documentation":"

    The resiliency policy.

    " + "documentation":"

    Resiliency policy of an application.

    " }, "resiliencyScore":{ "shape":"ResiliencyScore", - "documentation":"

    The current resiliency score for the application.

    " + "documentation":"

    Current resiliency score for an application.

    " }, "resourceErrorsDetails":{ "shape":"ResourceErrorsDetails", @@ -1233,11 +1295,15 @@ }, "startTime":{ "shape":"TimeStamp", - "documentation":"

    The starting time for the action.

    " + "documentation":"

    Starting time for the action.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"

    Version name of the published application.

    " } }, "documentation":"

    Defines an application assessment.

    " @@ -1258,51 +1324,59 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The version of the application.

    " + "documentation":"

    Version of an application.

    " }, "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "assessmentName":{ "shape":"EntityName", - "documentation":"

    The name of the assessment.

    " + "documentation":"

    Name of the assessment.

    " }, "assessmentStatus":{ "shape":"AssessmentStatus", - "documentation":"

    The current status of the assessment for the resiliency policy.

    " + "documentation":"

    Current status of the assessment for the resiliency policy.

    " }, "complianceStatus":{ "shape":"ComplianceStatus", - "documentation":"

    The current status of compliance for the resiliency policy.

    " + "documentation":"

    TCurrent status of compliance for the resiliency policy.

    " }, "cost":{ "shape":"Cost", - "documentation":"

    The cost for the application.

    " + "documentation":"

    Cost for an application.

    " + }, + "driftStatus":{ + "shape":"DriftStatus", + "documentation":"

    Indicates if compliance drifts (deviations) were detected while running an assessment for your application.

    " }, "endTime":{ "shape":"TimeStamp", - "documentation":"

    The end time for the action.

    " + "documentation":"

    End time for the action.

    " }, "invoker":{ "shape":"AssessmentInvoker", - "documentation":"

    The entity that invoked the assessment.

    " + "documentation":"

    Entity that invoked the assessment.

    " }, "message":{ "shape":"String500", - "documentation":"

    The message from the assessment run.

    " + "documentation":"

    Message from the assessment run.

    " }, "resiliencyScore":{ "shape":"Double", - "documentation":"

    The current resiliency score for the application.

    " + "documentation":"

    Current resiliency score for the application.

    " }, "startTime":{ "shape":"TimeStamp", - "documentation":"

    The starting time for the action.

    " + "documentation":"

    Starting time for the action.

    " + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"

    Name of an application version.

    " } }, "documentation":"

    Defines an application assessment summary.

    " @@ -1333,11 +1407,11 @@ }, "id":{ "shape":"String255", - "documentation":"

    Unique identifier of the Application Component.

    " + "documentation":"

    Identifier of the Application Component.

    " }, "name":{ "shape":"String255", - "documentation":"

    The name of the Application Component.

    " + "documentation":"

    Name of the Application Component.

    " }, "type":{ "shape":"String255", @@ -1351,7 +1425,7 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"

    The name of the Application Component.

    " + "documentation":"

    Name of the Application Component.

    " }, "compliance":{ "shape":"AssessmentCompliance", @@ -1371,7 +1445,7 @@ }, "status":{ "shape":"ComplianceStatus", - "documentation":"

    The status of the action.

    " + "documentation":"

    Status of the action.

    " } }, "documentation":"

    Defines the compliance of an Application Component against the resiliency policy.

    " @@ -1384,6 +1458,14 @@ "type":"list", "member":{"shape":"String255"} }, + "AppDriftStatusType":{ + "type":"string", + "enum":[ + "NotChecked", + "NotDetected", + "Detected" + ] + }, "AppInputSource":{ "type":"structure", "required":["importType"], @@ -1436,7 +1518,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "assessmentSchedule":{ "shape":"AppAssessmentScheduleType", @@ -1454,6 +1536,10 @@ "shape":"EntityDescription", "documentation":"

    The optional description for an app.

    " }, + "driftStatus":{ + "shape":"AppDriftStatusType", + "documentation":"

    Indicates if compliance drifts (deviations) were detected while running an assessment for your application.

    " + }, "name":{ "shape":"EntityName", "documentation":"

    The name of the application.

    " @@ -1464,7 +1550,7 @@ }, "status":{ "shape":"AppStatusType", - "documentation":"

    The status of the application.

    " + "documentation":"

    Status of the application.

    " } }, "documentation":"

    Defines an application summary.

    " @@ -1489,10 +1575,22 @@ "members":{ "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The version of the application.

    " + "documentation":"

    Version of an application.

    " + }, + "creationTime":{ + "shape":"TimeStamp", + "documentation":"

    Creation time of the application version.

    " + }, + "identifier":{ + "shape":"LongOptional", + "documentation":"

    Identifier of the application version.

    " + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"

    Name of the application version.

    " } }, - "documentation":"

    The version of the application.

    " + "documentation":"

    Version of an application.

    " }, "Arn":{ "type":"string", @@ -1533,6 +1631,103 @@ "type":"string", "pattern":"^[a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]$" }, + "BatchUpdateRecommendationStatusFailedEntries":{ + "type":"list", + "member":{"shape":"BatchUpdateRecommendationStatusFailedEntry"} + }, + "BatchUpdateRecommendationStatusFailedEntry":{ + "type":"structure", + "required":[ + "entryId", + "errorMessage" + ], + "members":{ + "entryId":{ + "shape":"String255", + "documentation":"

    An identifier of an entry in this batch that is used to communicate the result.

    The entryIds of a batch request need to be unique within a request.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    Indicates the error that occurred while excluding an operational recommendation.

    " + } + }, + "documentation":"

    List of operational recommendations that did not get included or excluded.

    " + }, + "BatchUpdateRecommendationStatusRequest":{ + "type":"structure", + "required":[ + "appArn", + "requestEntries" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + }, + "requestEntries":{ + "shape":"UpdateRecommendationStatusRequestEntries", + "documentation":"

    Defines the list of operational recommendations that need to be included or excluded.

    " + } + } + }, + "BatchUpdateRecommendationStatusResponse":{ + "type":"structure", + "required":[ + "appArn", + "failedEntries", + "successfulEntries" + ], + "members":{ + "appArn":{ + "shape":"Arn", + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + }, + "failedEntries":{ + "shape":"BatchUpdateRecommendationStatusFailedEntries", + "documentation":"

    A list of items with error details about each item, which could not be included or excluded.

    " + }, + "successfulEntries":{ + "shape":"BatchUpdateRecommendationStatusSuccessfulEntries", + "documentation":"

    A list of items that were included or excluded.

    " + } + } + }, + "BatchUpdateRecommendationStatusSuccessfulEntries":{ + "type":"list", + "member":{"shape":"BatchUpdateRecommendationStatusSuccessfulEntry"} + }, + "BatchUpdateRecommendationStatusSuccessfulEntry":{ + "type":"structure", + "required":[ + "entryId", + "excluded", + "item", + "referenceId" + ], + "members":{ + "entryId":{ + "shape":"String255", + "documentation":"

    An identifier for an entry in this batch that is used to communicate the result.

    The entryIds of a batch request need to be unique within a request.

    " + }, + "excludeReason":{ + "shape":"ExcludeRecommendationReason", + "documentation":"

    Indicates the reason for excluding an operational recommendation.

    " + }, + "excluded":{ + "shape":"BooleanOptional", + "documentation":"

    Indicates if the operational recommendation was successfully excluded.

    " + }, + "item":{ + "shape":"UpdateRecommendationStatusItem", + "documentation":"

    The operational recommendation item.

    " + }, + "referenceId":{ + "shape":"SpecReferenceId", + "documentation":"

    Reference identifier of the operational recommendation.

    " + } + }, + "documentation":"

    List of operational recommendations that were successfully included or excluded.

    " + }, "BooleanOptional":{ "type":"boolean", "box":true @@ -1543,6 +1738,56 @@ "min":1, "pattern":"^[A-za-z0-9_.-]{0,63}$" }, + "ComplianceDrift":{ + "type":"structure", + "members":{ + "actualReferenceId":{ + "shape":"String255", + "documentation":"

    Assessment identifier that is associated with this drift item.

    " + }, + "actualValue":{ + "shape":"AssessmentCompliance", + "documentation":"

    Actual compliance value of the entity.

    " + }, + "appId":{ + "shape":"String255", + "documentation":"

    Identifier of your application.

    " + }, + "appVersion":{ + "shape":"String255", + "documentation":"

    Published version of your application on which drift was detected.

    " + }, + "diffType":{ + "shape":"DifferenceType", + "documentation":"

    Difference type between actual and expected recovery point objective (RPO) and recovery time objective (RTO) values. Currently, Resilience Hub supports only NotEqual difference type.

    " + }, + "driftType":{ + "shape":"DriftType", + "documentation":"

    The type of drift detected. Currently, Resilience Hub supports only ApplicationCompliance drift type.

    " + }, + "entityId":{ + "shape":"String255", + "documentation":"

    Identifier of an entity in which drift was detected. For compliance drift, the entity ID can be either application ID or the AppComponent ID.

    " + }, + "entityType":{ + "shape":"String255", + "documentation":"

    The type of entity in which drift was detected. For compliance drifts, Resilience Hub supports AWS::ResilienceHub::AppComponent and AWS::ResilienceHub::Application.

    " + }, + "expectedReferenceId":{ + "shape":"String255", + "documentation":"

    Assessment identifier of a previous assessment of the same application version. Resilience Hub uses the previous assessment (associated with the reference identifier) to compare the compliance with the current assessment to identify drifts.

    " + }, + "expectedValue":{ + "shape":"AssessmentCompliance", + "documentation":"

    The expected compliance value of an entity.

    " + } + }, + "documentation":"

    Indicates the compliance drifts (recovery time objective (RTO) and recovery point objective (RPO)) that were detected for an assessed entity.

    " + }, + "ComplianceDriftList":{ + "type":"list", + "member":{"shape":"ComplianceDrift"} + }, "ComplianceStatus":{ "type":"string", "enum":[ @@ -1564,15 +1809,15 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"

    The name of the Application Component.

    " + "documentation":"

    Name of the Application Component.

    " }, "configRecommendations":{ "shape":"ConfigRecommendationList", - "documentation":"

    The list of recommendations.

    " + "documentation":"

    List of recommendations.

    " }, "recommendationStatus":{ "shape":"RecommendationComplianceStatus", - "documentation":"

    The recommendation status.

    " + "documentation":"

    Status of the recommendation.

    " } }, "documentation":"

    Defines recommendations for an Resilience Hub Application Component, returned as an object. This object contains component names, configuration recommendations, and recommendation statuses.

    " @@ -1591,7 +1836,7 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"

    The name of the Application Component.

    " + "documentation":"

    Name of the Application Component.

    " }, "compliance":{ "shape":"AssessmentCompliance", @@ -1623,14 +1868,14 @@ }, "referenceId":{ "shape":"SpecReferenceId", - "documentation":"

    The reference identifier for the recommendation configuration.

    " + "documentation":"

    Reference identifier for the recommendation configuration.

    " }, "suggestedChanges":{ "shape":"SuggestedChangesList", "documentation":"

    List of the suggested configuration changes.

    " } }, - "documentation":"

    Defines a configuration recommendation.

    " + "documentation":"

    Defines a recommendation configuration.

    " }, "ConfigRecommendationList":{ "type":"list", @@ -1716,17 +1961,25 @@ "shape":"EntityDescription", "documentation":"

    The optional description for an app.

    " }, + "eventSubscriptions":{ + "shape":"EventSubscriptionList", + "documentation":"

    The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports only Drift detected and Scheduled assessment failure events notification.

    " + }, "name":{ "shape":"EntityName", - "documentation":"

    The name for the application.

    " + "documentation":"

    Name of the application.

    " + }, + "permissionModel":{ + "shape":"PermissionModel", + "documentation":"

    Defines the roles and credentials that Resilience Hub would use while creating the application, importing its resources, and running an assessment.

    " }, "policyArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " } } }, @@ -1754,7 +2007,7 @@ }, "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "clientToken":{ "shape":"ClientToken", @@ -1763,15 +2016,15 @@ }, "id":{ "shape":"String255", - "documentation":"

    The identifier of the Application Component.

    " + "documentation":"

    Identifier of the Application Component.

    " }, "name":{ "shape":"String255", - "documentation":"

    The name of the Application Component.

    " + "documentation":"

    Name of the Application Component.

    " }, "type":{ "shape":"String255", - "documentation":"

    The type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

    " + "documentation":"

    Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

    " } } }, @@ -1784,15 +2037,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appComponent":{ "shape":"AppComponent", - "documentation":"

    The list of Application Components that belong to this resource.

    " + "documentation":"

    List of Application Components that belong to this resource.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " } } }, @@ -1812,19 +2065,19 @@ }, "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appComponents":{ "shape":"AppComponentNameList", - "documentation":"

    The list of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

    " + "documentation":"

    List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

    " }, "awsAccountId":{ "shape":"CustomerId", - "documentation":"

    The Amazon Web Services account that owns the physical resource.

    " + "documentation":"

    Amazon Web Services account that owns the physical resource.

    " }, "awsRegion":{ "shape":"AwsRegion", - "documentation":"

    The Amazon Web Services region that owns the physical resource.

    " + "documentation":"

    Amazon Web Services region that owns the physical resource.

    " }, "clientToken":{ "shape":"ClientToken", @@ -1833,19 +2086,19 @@ }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"

    The logical identifier of the resource.

    " + "documentation":"

    Logical identifier of the resource.

    " }, "physicalResourceId":{ "shape":"String2048", - "documentation":"

    The physical identifier of the resource.

    " + "documentation":"

    Physical identifier of the resource.

    " }, "resourceName":{ "shape":"EntityName", - "documentation":"

    The name of the resource.

    " + "documentation":"

    Name of the resource.

    " }, "resourceType":{ "shape":"String255", - "documentation":"

    The type of resource.

    " + "documentation":"

    Type of resource.

    " } } }, @@ -1858,11 +2111,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " }, "physicalResource":{ "shape":"PhysicalResource", @@ -1879,7 +2132,7 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "bucketName":{ "shape":"EntityName", @@ -1908,7 +2161,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " } } }, @@ -1952,7 +2205,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " }, "tier":{ "shape":"ResiliencyPolicyTier", @@ -1993,7 +2246,7 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "clientToken":{ "shape":"ClientToken", @@ -2011,7 +2264,7 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "assessmentStatus":{ "shape":"AssessmentStatus", @@ -2025,7 +2278,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "clientToken":{ "shape":"ClientToken", @@ -2051,11 +2304,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appInputSource":{ "shape":"AppInputSource", - "documentation":"

    The name of the input source from where the application resource is imported from.

    " + "documentation":"

    Name of the input source from where the application resource is imported from.

    " } } }, @@ -2065,7 +2318,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "clientToken":{ "shape":"ClientToken", @@ -2084,7 +2337,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -2097,7 +2350,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "clientToken":{ "shape":"ClientToken", @@ -2106,7 +2359,7 @@ }, "id":{ "shape":"String255", - "documentation":"

    The identifier of the Application Component.

    " + "documentation":"

    Identifier of the Application Component.

    " } } }, @@ -2119,15 +2372,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appComponent":{ "shape":"AppComponent", - "documentation":"

    The list of Application Components that belong to this resource.

    " + "documentation":"

    List of Application Components that belong to this resource.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " } } }, @@ -2137,15 +2390,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "awsAccountId":{ "shape":"CustomerId", - "documentation":"

    The Amazon Web Services account that owns the physical resource.

    " + "documentation":"

    Amazon Web Services account that owns the physical resource.

    " }, "awsRegion":{ "shape":"AwsRegion", - "documentation":"

    The Amazon Web Services region that owns the physical resource.

    " + "documentation":"

    Amazon Web Services region that owns the physical resource.

    " }, "clientToken":{ "shape":"ClientToken", @@ -2154,15 +2407,15 @@ }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"

    The logical identifier of the resource.

    " + "documentation":"

    Logical identifier of the resource.

    " }, "physicalResourceId":{ "shape":"String2048", - "documentation":"

    The physical identifier of the resource.

    " + "documentation":"

    Physical identifier of the resource.

    " }, "resourceName":{ "shape":"EntityName", - "documentation":"

    The name of the resource.

    " + "documentation":"

    Name of the resource.

    " } } }, @@ -2175,11 +2428,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " }, "physicalResource":{ "shape":"PhysicalResource", @@ -2215,7 +2468,7 @@ }, "status":{ "shape":"RecommendationTemplateStatus", - "documentation":"

    The status of the action.

    " + "documentation":"

    Status of the action.

    " } } }, @@ -2230,7 +2483,7 @@ }, "policyArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -2240,7 +2493,7 @@ "members":{ "policyArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -2250,7 +2503,7 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -2270,7 +2523,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -2294,15 +2547,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " }, "id":{ "shape":"String255", - "documentation":"

    The identifier of the Application Component.

    " + "documentation":"

    Identifier of the Application Component.

    " } } }, @@ -2315,15 +2568,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appComponent":{ "shape":"AppComponent", - "documentation":"

    The list of Application Components that belong to this resource.

    " + "documentation":"

    List of Application Components that belong to this resource.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " } } }, @@ -2336,11 +2589,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " } } }, @@ -2353,31 +2606,31 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " }, "awsAccountId":{ "shape":"CustomerId", - "documentation":"

    The Amazon Web Services account that owns the physical resource.

    " + "documentation":"

    Amazon Web Services account that owns the physical resource.

    " }, "awsRegion":{ "shape":"AwsRegion", - "documentation":"

    The Amazon Web Services region that owns the physical resource.

    " + "documentation":"

    Amazon Web Services region that owns the physical resource.

    " }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"

    The logical identifier of the resource.

    " + "documentation":"

    Logical identifier of the resource.

    " }, "physicalResourceId":{ "shape":"String2048", - "documentation":"

    The physical identifier of the resource.

    " + "documentation":"

    Physical identifier of the resource.

    " }, "resourceName":{ "shape":"EntityName", - "documentation":"

    The name of the resource.

    " + "documentation":"

    Name of the resource.

    " } } }, @@ -2390,11 +2643,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " }, "physicalResource":{ "shape":"PhysicalResource", @@ -2411,7 +2664,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -2434,7 +2687,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -2450,7 +2703,7 @@ }, "status":{ "shape":"ResourceResolutionStatusType", - "documentation":"

    The status of the action.

    " + "documentation":"

    Status of the action.

    " } } }, @@ -2467,11 +2720,11 @@ }, "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " } } }, @@ -2484,7 +2737,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -2502,11 +2755,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appTemplateBody":{ "shape":"AppTemplateBody", - "documentation":"

    A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section.

    The appTemplateBody JSON string has the following structure:

    • resources

      The list of logical resources that must be included in the Resilience Hub application.

      Type: Array

      Don't add the resources that you want to exclude.

      Each resources array item includes the following fields:

      • logicalResourceId

        The logical identifier of the resource.

        Type: Object

        Each logicalResourceId object includes the following fields:

        • identifier

          The identifier of the resource.

          Type: String

        • logicalStackName

          The name of the CloudFormation stack this resource belongs to.

          Type: String

        • resourceGroupName

          The name of the resource group this resource belongs to.

          Type: String

        • terraformSourceName

          The name of the Terraform S3 state file this resource belongs to.

          Type: String

        • eksSourceName

          The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

          This parameter accepts values in \"eks-cluster/namespace\" format.

          Type: String

      • type

        The type of resource.

        Type: string

      • name

        The name of the resource.

        Type: String

      • additionalInfo

        Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

        Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

        Key: \"failover-regions\"

        Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    • appComponents

      The list of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

      Type: Array

      Each appComponents array item includes the following fields:

      • name

        The name of the Application Component.

        Type: String

      • type

        The type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

        Type: String

      • resourceNames

        The list of included resources that are assigned to the Application Component.

        Type: Array of strings

      • additionalInfo

        Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

        Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

        Key: \"failover-regions\"

        Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    • excludedResources

      The list of logical resource identifiers to be excluded from the application.

      Type: Array

      Don't add the resources that you want to include.

      Each excludedResources array item includes the following fields:

      • logicalResourceIds

        The logical identifier of the resource.

        Type: Object

        You can configure only one of the following fields:

        • logicalStackName

        • resourceGroupName

        • terraformSourceName

        • eksSourceName

        Each logicalResourceIds object includes the following fields:

        • identifier

          The identifier of the resource.

          Type: String

        • logicalStackName

          The name of the CloudFormation stack this resource belongs to.

          Type: String

        • resourceGroupName

          The name of the resource group this resource belongs to.

          Type: String

        • terraformSourceName

          The name of the Terraform S3 state file this resource belongs to.

          Type: String

        • eksSourceName

          The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

          This parameter accepts values in \"eks-cluster/namespace\" format.

          Type: String

    • version

      The Resilience Hub application version.

    • additionalInfo

      Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

      Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

      Key: \"failover-regions\"

      Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    " + "documentation":"

    A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section.

    The appTemplateBody JSON string has the following structure:

    • resources

      The list of logical resources that must be included in the Resilience Hub application.

      Type: Array

      Don't add the resources that you want to exclude.

      Each resources array item includes the following fields:

      • logicalResourceId

        Logical identifier of the resource.

        Type: Object

        Each logicalResourceId object includes the following fields:

        • identifier

          Identifier of the resource.

          Type: String

        • logicalStackName

          The name of the CloudFormation stack this resource belongs to.

          Type: String

        • resourceGroupName

          The name of the resource group this resource belongs to.

          Type: String

        • terraformSourceName

          The name of the Terraform S3 state file this resource belongs to.

          Type: String

        • eksSourceName

          Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

          This parameter accepts values in \"eks-cluster/namespace\" format.

          Type: String

      • type

        The type of resource.

        Type: string

      • name

        The name of the resource.

        Type: String

      • additionalInfo

        Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

        Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

        Key: \"failover-regions\"

        Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    • appComponents

      List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

      Type: Array

      Each appComponents array item includes the following fields:

      • name

        Name of the Application Component.

        Type: String

      • type

        Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

        Type: String

      • resourceNames

        The list of included resources that are assigned to the Application Component.

        Type: Array of strings

      • additionalInfo

        Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

        Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

        Key: \"failover-regions\"

        Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    • excludedResources

      The list of logical resource identifiers to be excluded from the application.

      Type: Array

      Don't add the resources that you want to include.

      Each excludedResources array item includes the following fields:

      • logicalResourceIds

        Logical identifier of the resource.

        Type: Object

        You can configure only one of the following fields:

        • logicalStackName

        • resourceGroupName

        • terraformSourceName

        • eksSourceName

        Each logicalResourceIds object includes the following fields:

        • identifier

          Identifier of the resource.

          Type: String

        • logicalStackName

          The name of the CloudFormation stack this resource belongs to.

          Type: String

        • resourceGroupName

          The name of the resource group this resource belongs to.

          Type: String

        • terraformSourceName

          The name of the Terraform S3 state file this resource belongs to.

          Type: String

        • eksSourceName

          Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

          This parameter accepts values in \"eks-cluster/namespace\" format.

          Type: String

    • version

      Resilience Hub application version.

    • additionalInfo

      Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

      Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

      Key: \"failover-regions\"

      Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    " }, "appVersion":{ "shape":"EntityVersion", @@ -2520,7 +2773,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -2535,7 +2788,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -2547,7 +2800,7 @@ }, "status":{ "shape":"ResourceImportStatusType", - "documentation":"

    The status of the action.

    " + "documentation":"

    Status of the action.

    " }, "statusChangeTime":{ "shape":"TimeStamp", @@ -2561,7 +2814,7 @@ "members":{ "policyArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -2575,6 +2828,10 @@ } } }, + "DifferenceType":{ + "type":"string", + "enum":["NotEqual"] + }, "DisruptionCompliance":{ "type":"structure", "required":["complianceStatus"], @@ -2609,7 +2866,7 @@ }, "rpoReferenceId":{ "shape":"String500", - "documentation":"

    The RPO reference identifier.

    " + "documentation":"

    Reference identifier of the RPO .

    " }, "rtoDescription":{ "shape":"String500", @@ -2617,7 +2874,7 @@ }, "rtoReferenceId":{ "shape":"String500", - "documentation":"

    The RTO reference identifier.

    " + "documentation":"

    Reference identifier of the RTO.

    " } }, "documentation":"

    Defines the compliance against the resiliency policy for a disruption.

    " @@ -2647,6 +2904,18 @@ "min":1 }, "Double":{"type":"double"}, + "DriftStatus":{ + "type":"string", + "enum":[ + "NotChecked", + "NotDetected", + "Detected" + ] + }, + "DriftType":{ + "type":"string", + "enum":["ApplicationCompliance"] + }, "EksNamespace":{ "type":"string", "max":63, @@ -2666,7 +2935,7 @@ "members":{ "eksClusterArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Elastic Kubernetes Service cluster. The format for this ARN is: arn:aws:eks:region:account-id:cluster/cluster-name. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Amazon Elastic Kubernetes Service cluster. The format for this ARN is: arn:aws:eks:region:account-id:cluster/cluster-name. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "namespaces":{ "shape":"EksNamespaceList", @@ -2684,7 +2953,7 @@ "members":{ "eksClusterArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Elastic Kubernetes Service cluster. The format for this ARN is: arn:aws:eks:region:account-id:cluster/cluster-name. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Amazon Elastic Kubernetes Service cluster. The format for this ARN is: arn:aws:eks:region:account-id:cluster/cluster-name. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "namespace":{ "shape":"EksNamespace", @@ -2732,6 +3001,49 @@ "L4" ] }, + "EventSubscription":{ + "type":"structure", + "required":[ + "eventType", + "name" + ], + "members":{ + "eventType":{ + "shape":"EventType", + "documentation":"

    The type of event you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected (DriftDetected) and Scheduled assessment failure (ScheduledAssessmentFailure) events.

    " + }, + "name":{ + "shape":"String255", + "documentation":"

    Unique name to identify an event subscription.

    " + }, + "snsTopicArn":{ + "shape":"Arn", + "documentation":"

    Amazon Resource Name (ARN) of the Amazon Simple Notification Service topic. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + } + }, + "documentation":"

    Indicates an event you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected and Scheduled assessment failure events.

    " + }, + "EventSubscriptionList":{ + "type":"list", + "member":{"shape":"EventSubscription"}, + "max":10, + "min":0 + }, + "EventType":{ + "type":"string", + "enum":[ + "ScheduledAssessmentFailure", + "DriftDetected" + ] + }, + "ExcludeRecommendationReason":{ + "type":"string", + "enum":[ + "AlreadyImplemented", + "NotRelevant", + "ComplexityOfImplementation" + ] + }, "FailurePolicy":{ "type":"structure", "required":[ @@ -2760,13 +3072,27 @@ "NoRecoveryPlan" ] }, + "IamRoleArn":{ + "type":"string", + "pattern":"^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):iam::[0-9]{12}:role/(([^/][!-~]+/){1,511})?[A-Za-z0-9_+=,.@-]{1,64}$" + }, + "IamRoleArnList":{ + "type":"list", + "member":{"shape":"IamRoleArn"}, + "max":10, + "min":0 + }, + "IamRoleName":{ + "type":"string", + "pattern":"^([^/]([!-~]+/){1,511})?[A-Za-z0-9_+=,.@-]{1,64}$" + }, "ImportResourcesToDraftAppVersionRequest":{ "type":"structure", "required":["appArn"], "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "eksSources":{ "shape":"EksSourceList", @@ -2796,7 +3122,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -2812,7 +3138,7 @@ }, "status":{ "shape":"ResourceImportStatusType", - "documentation":"

    The status of the action.

    " + "documentation":"

    Status of the action.

    " }, "terraformSources":{ "shape":"TerraformSourceList", @@ -2837,11 +3163,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", @@ -2859,7 +3185,39 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " + } + } + }, + "ListAppAssessmentComplianceDriftsRequest":{ + "type":"structure", + "required":["assessmentArn"], + "members":{ + "assessmentArn":{ + "shape":"Arn", + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    Indicates the maximum number of applications requested.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    Indicates the unique token number of the next application to be checked for compliance and regulatory requirements from the list of applications.

    " + } + } + }, + "ListAppAssessmentComplianceDriftsResponse":{ + "type":"structure", + "required":["complianceDrifts"], + "members":{ + "complianceDrifts":{ + "shape":"ComplianceDriftList", + "documentation":"

    Indicates compliance drifts (recovery time objective (RTO) and recovery point objective (RPO)) detected for an assessed entity.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    Token number of the next application to be checked for compliance and regulatory requirements from the list of applications.

    " } } }, @@ -2868,7 +3226,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    ", + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    ", "location":"querystring", "locationName":"appArn" }, @@ -2898,7 +3256,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", "location":"querystring", "locationName":"maxResults" }, @@ -2926,7 +3284,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " } } }, @@ -2936,11 +3294,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", @@ -2958,7 +3316,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " } } }, @@ -2968,11 +3326,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", @@ -2990,7 +3348,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " } } }, @@ -3003,11 +3361,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " }, "maxResults":{ "shape":"MaxResults", @@ -3029,7 +3387,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " } } }, @@ -3042,11 +3400,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The version of the Application Component.

    " + "documentation":"

    Version of the Application Component.

    " }, "maxResults":{ "shape":"MaxResults", @@ -3067,7 +3425,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appComponents":{ "shape":"AppComponentList", @@ -3075,11 +3433,11 @@ }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " } } }, @@ -3092,7 +3450,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -3100,7 +3458,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", @@ -3114,7 +3472,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " }, "resourceMappings":{ "shape":"ResourceMappingList", @@ -3131,7 +3489,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -3139,7 +3497,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", @@ -3160,7 +3518,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " }, "physicalResources":{ "shape":"PhysicalResourceList", @@ -3178,15 +3536,23 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + }, + "endTime":{ + "shape":"TimeStamp", + "documentation":"

    Upper limit of the time range to filter the application versions.

    " }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", "documentation":"

    Null, or the token from a previous call to get the next set of results.

    " + }, + "startTime":{ + "shape":"TimeStamp", + "documentation":"

    Lower limit of the time range to filter the application versions.

    " } } }, @@ -3200,7 +3566,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " } } }, @@ -3209,13 +3575,13 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    ", + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    ", "location":"querystring", "locationName":"appArn" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", "location":"querystring", "locationName":"maxResults" }, @@ -3243,7 +3609,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " } } }, @@ -3253,13 +3619,13 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    ", + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    ", "location":"querystring", "locationName":"assessmentArn" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", "location":"querystring", "locationName":"maxResults" }, @@ -3289,7 +3655,7 @@ }, "status":{ "shape":"RecommendationTemplateStatusList", - "documentation":"

    The status of the action.

    ", + "documentation":"

    Status of the action.

    ", "location":"querystring", "locationName":"status" } @@ -3300,7 +3666,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " }, "recommendationTemplates":{ "shape":"RecommendationTemplateList", @@ -3313,7 +3679,7 @@ "members":{ "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", "location":"querystring", "locationName":"maxResults" }, @@ -3337,7 +3703,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " }, "resiliencyPolicies":{ "shape":"ResiliencyPolicies", @@ -3351,11 +3717,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", @@ -3369,7 +3735,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " }, "sopRecommendations":{ "shape":"SopRecommendationList", @@ -3382,7 +3748,7 @@ "members":{ "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", "location":"querystring", "locationName":"maxResults" }, @@ -3400,7 +3766,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " }, "resiliencyPolicies":{ "shape":"ResiliencyPolicies", @@ -3425,7 +3791,7 @@ "members":{ "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " } } }, @@ -3435,11 +3801,11 @@ "members":{ "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", @@ -3453,7 +3819,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " }, "testRecommendations":{ "shape":"TestRecommendationList", @@ -3470,7 +3836,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -3478,7 +3844,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " + "documentation":"

    Maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

    " }, "nextToken":{ "shape":"NextToken", @@ -3499,7 +3865,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next set of results, or null if there are no more results.

    " + "documentation":"

    Token for the next set of results, or null if there are no more results.

    " }, "resolutionId":{ "shape":"String255", @@ -3517,11 +3883,11 @@ "members":{ "eksSourceName":{ "shape":"String255", - "documentation":"

    The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

    This parameter accepts values in \"eks-cluster/namespace\" format.

    " + "documentation":"

    Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

    This parameter accepts values in \"eks-cluster/namespace\" format.

    " }, "identifier":{ "shape":"String255", - "documentation":"

    The identifier of the resource.

    " + "documentation":"

    Identifier of the resource.

    " }, "logicalStackName":{ "shape":"String255", @@ -3538,6 +3904,10 @@ }, "documentation":"

    Defines a logical resource identifier.

    " }, + "LongOptional":{ + "type":"long", + "box":true + }, "MaxResults":{ "type":"integer", "box":true, @@ -3548,6 +3918,32 @@ "type":"string", "pattern":"^\\S{1,2000}$" }, + "PermissionModel":{ + "type":"structure", + "required":["type"], + "members":{ + "crossAccountRoleArns":{ + "shape":"IamRoleArnList", + "documentation":"

    Defines a list of role Amazon Resource Names (ARNs) to be used in other accounts. These ARNs are used for querying purposes while importing resources and assessing your application.

    • These ARNs are required only when your resources are in other accounts and you have different role name in these accounts. Else, the invoker role name will be used in the other accounts.

    • These roles must have a trust policy with iam:AssumeRole permission to the invoker role in the primary account.

    " + }, + "invokerRoleName":{ + "shape":"IamRoleName", + "documentation":"

    Existing Amazon Web Services IAM role name in the primary Amazon Web Services account that will be assumed by Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.

    You must have iam:passRole permission for this role while creating or updating the application.

    " + }, + "type":{ + "shape":"PermissionModelType", + "documentation":"

    Defines how Resilience Hub scans your resources. It can scan for the resources by using a pre-existing role in your Amazon Web Services account, or by using the credentials of the current IAM user.

    " + } + }, + "documentation":"

    Defines the roles and credentials that Resilience Hub would use while creating the application, importing its resources, and running an assessment.

    " + }, + "PermissionModelType":{ + "type":"string", + "enum":[ + "LegacyIAMUser", + "RoleBased" + ] + }, "PhysicalIdentifierType":{ "type":"string", "enum":[ @@ -3577,15 +3973,15 @@ }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"

    The logical identifier of the resource.

    " + "documentation":"

    Logical identifier of the resource.

    " }, "parentResourceName":{ "shape":"EntityName", - "documentation":"

    The name of the parent resource.

    " + "documentation":"

    Name of the parent resource.

    " }, "physicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"

    The physical identifier of the resource.

    " + "documentation":"

    Identifier of the physical resource.

    " }, "resourceName":{ "shape":"EntityName", @@ -3597,7 +3993,7 @@ }, "sourceType":{ "shape":"ResourceSourceType", - "documentation":"

    The type of input source.

    " + "documentation":"

    Type of input source.

    " } }, "documentation":"

    Defines a physical resource. A physical resource is a resource that exists in your account. It can be identified using an Amazon Resource Name (ARN) or an Resilience Hub-native identifier.

    " @@ -3619,11 +4015,11 @@ }, "identifier":{ "shape":"String255", - "documentation":"

    The identifier of the physical resource.

    " + "documentation":"

    Identifier of the physical resource.

    " }, "type":{ "shape":"PhysicalIdentifierType", - "documentation":"

    Specifies the type of physical resource identifier.

    Arn

    The resource identifier is an Amazon Resource Name (ARN) .

    Native

    The resource identifier is an Resilience Hub-native identifier.

    " + "documentation":"

    Specifies the type of physical resource identifier.

    Arn

    The resource identifier is an Amazon Resource Name (ARN) and it can identify the following list of resources:

    • AWS::ECS::Service

    • AWS::EFS::FileSystem

    • AWS::ElasticLoadBalancingV2::LoadBalancer

    • AWS::Lambda::Function

    • AWS::SNS::Topic

    Native

    The resource identifier is an Resilience Hub-native identifier and it can identify the following list of resources:

    • AWS::ApiGateway::RestApi

    • AWS::ApiGatewayV2::Api

    • AWS::AutoScaling::AutoScalingGroup

    • AWS::DocDB::DBCluster

    • AWS::DocDB::DBGlobalCluster

    • AWS::DocDB::DBInstance

    • AWS::DynamoDB::GlobalTable

    • AWS::DynamoDB::Table

    • AWS::EC2::EC2Fleet

    • AWS::EC2::Instance

    • AWS::EC2::NatGateway

    • AWS::EC2::Volume

    • AWS::ElasticLoadBalancing::LoadBalancer

    • AWS::RDS::DBCluster

    • AWS::RDS::DBInstance

    • AWS::RDS::GlobalCluster

    • AWS::Route53::RecordSet

    • AWS::S3::Bucket

    • AWS::SQS::Queue

    " } }, "documentation":"

    Defines a physical resource identifier.

    " @@ -3638,7 +4034,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"

    Name of the application version.

    " } } }, @@ -3648,11 +4048,19 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", "documentation":"

    The version of the application.

    " + }, + "identifier":{ + "shape":"LongOptional", + "documentation":"

    Identifier of the application version.

    " + }, + "versionName":{ + "shape":"EntityVersion", + "documentation":"

    Name of the application version.

    " } } }, @@ -3665,11 +4073,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appTemplateBody":{ "shape":"AppTemplateBody", - "documentation":"

    A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section.

    The appTemplateBody JSON string has the following structure:

    • resources

      The list of logical resources that must be included in the Resilience Hub application.

      Type: Array

      Don't add the resources that you want to exclude.

      Each resources array item includes the following fields:

      • logicalResourceId

        The logical identifier of the resource.

        Type: Object

        Each logicalResourceId object includes the following fields:

        • identifier

          The identifier of the resource.

          Type: String

        • logicalStackName

          The name of the CloudFormation stack this resource belongs to.

          Type: String

        • resourceGroupName

          The name of the resource group this resource belongs to.

          Type: String

        • terraformSourceName

          The name of the Terraform S3 state file this resource belongs to.

          Type: String

        • eksSourceName

          The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

          This parameter accepts values in \"eks-cluster/namespace\" format.

          Type: String

      • type

        The type of resource.

        Type: string

      • name

        The name of the resource.

        Type: String

      • additionalInfo

        Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

        Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

        Key: \"failover-regions\"

        Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    • appComponents

      The list of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

      Type: Array

      Each appComponents array item includes the following fields:

      • name

        The name of the Application Component.

        Type: String

      • type

        The type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

        Type: String

      • resourceNames

        The list of included resources that are assigned to the Application Component.

        Type: Array of strings

      • additionalInfo

        Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

        Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

        Key: \"failover-regions\"

        Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    • excludedResources

      The list of logical resource identifiers to be excluded from the application.

      Type: Array

      Don't add the resources that you want to include.

      Each excludedResources array item includes the following fields:

      • logicalResourceIds

        The logical identifier of the resource.

        Type: Object

        You can configure only one of the following fields:

        • logicalStackName

        • resourceGroupName

        • terraformSourceName

        • eksSourceName

        Each logicalResourceIds object includes the following fields:

        • identifier

          The identifier of the resource.

          Type: String

        • logicalStackName

          The name of the CloudFormation stack this resource belongs to.

          Type: String

        • resourceGroupName

          The name of the resource group this resource belongs to.

          Type: String

        • terraformSourceName

          The name of the Terraform S3 state file this resource belongs to.

          Type: String

        • eksSourceName

          The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

          This parameter accepts values in \"eks-cluster/namespace\" format.

          Type: String

    • version

      The Resilience Hub application version.

    • additionalInfo

      Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

      Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

      Key: \"failover-regions\"

      Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    " + "documentation":"

    A JSON string that provides information about your application structure. To learn more about the appTemplateBody template, see the sample template provided in the Examples section.

    The appTemplateBody JSON string has the following structure:

    • resources

      The list of logical resources that must be included in the Resilience Hub application.

      Type: Array

      Don't add the resources that you want to exclude.

      Each resources array item includes the following fields:

      • logicalResourceId

        Logical identifier of the resource.

        Type: Object

        Each logicalResourceId object includes the following fields:

        • identifier

          Identifier of the resource.

          Type: String

        • logicalStackName

          The name of the CloudFormation stack this resource belongs to.

          Type: String

        • resourceGroupName

          The name of the resource group this resource belongs to.

          Type: String

        • terraformSourceName

          The name of the Terraform S3 state file this resource belongs to.

          Type: String

        • eksSourceName

          Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

          This parameter accepts values in \"eks-cluster/namespace\" format.

          Type: String

      • type

        The type of resource.

        Type: string

      • name

        The name of the resource.

        Type: String

      • additionalInfo

        Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

        Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

        Key: \"failover-regions\"

        Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    • appComponents

      List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

      Type: Array

      Each appComponents array item includes the following fields:

      • name

        Name of the Application Component.

        Type: String

      • type

        Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

        Type: String

      • resourceNames

        The list of included resources that are assigned to the Application Component.

        Type: Array of strings

      • additionalInfo

        Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

        Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

        Key: \"failover-regions\"

        Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    • excludedResources

      The list of logical resource identifiers to be excluded from the application.

      Type: Array

      Don't add the resources that you want to include.

      Each excludedResources array item includes the following fields:

      • logicalResourceIds

        Logical identifier of the resource.

        Type: Object

        You can configure only one of the following fields:

        • logicalStackName

        • resourceGroupName

        • terraformSourceName

        • eksSourceName

        Each logicalResourceIds object includes the following fields:

        • identifier

          Identifier of the resource.

          Type: String

        • logicalStackName

          The name of the CloudFormation stack this resource belongs to.

          Type: String

        • resourceGroupName

          The name of the resource group this resource belongs to.

          Type: String

        • terraformSourceName

          The name of the Terraform S3 state file this resource belongs to.

          Type: String

        • eksSourceName

          Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

          This parameter accepts values in \"eks-cluster/namespace\" format.

          Type: String

    • version

      Resilience Hub application version.

    • additionalInfo

      Additional configuration parameters for an Resilience Hub application. If you want to implement additionalInfo through the Resilience Hub console rather than using an API call, see Configure the application configuration parameters.

      Currently, this parameter accepts a key-value mapping (in a string format) of only one failover region and one associated account.

      Key: \"failover-regions\"

      Value: \"[{\"region\":\"<REGION>\", \"accounts\":[{\"id\":\"<ACCOUNT_ID>\"}]}]\"

    " } } }, @@ -3678,7 +4086,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -3739,13 +4147,21 @@ "shape":"BooleanOptional", "documentation":"

    Specifies if the recommendation has already been implemented.

    " }, + "excludeReason":{ + "shape":"ExcludeRecommendationReason", + "documentation":"

    Indicates the reason for excluding an operational recommendation.

    " + }, + "excluded":{ + "shape":"BooleanOptional", + "documentation":"

    Indicates if an operational recommendation item is excluded.

    " + }, "resourceId":{ "shape":"String500", - "documentation":"

    The resource identifier.

    " + "documentation":"

    Identifier of the resource.

    " }, "targetAccountId":{ "shape":"CustomerId", - "documentation":"

    The target account identifier.

    " + "documentation":"

    Identifier of the target account.

    " }, "targetRegion":{ "shape":"AwsRegion", @@ -3771,11 +4187,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "assessmentArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the assessment. The format for this ARN is: arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "endTime":{ "shape":"TimeStamp", @@ -3783,15 +4199,15 @@ }, "format":{ "shape":"TemplateFormat", - "documentation":"

    The format of the recommendation template.

    CfnJson

    The template is CloudFormation JSON.

    CfnYaml

    The template is CloudFormation YAML.

    " + "documentation":"

    Format of the recommendation template.

    CfnJson

    The template is CloudFormation JSON.

    CfnYaml

    The template is CloudFormation YAML.

    " }, "message":{ "shape":"String500", - "documentation":"

    The message for the recommendation template.

    " + "documentation":"

    Message for the recommendation template.

    " }, "name":{ "shape":"EntityName", - "documentation":"

    The name for the recommendation template.

    " + "documentation":"

    Name for the recommendation template.

    " }, "needsReplacements":{ "shape":"BooleanOptional", @@ -3803,7 +4219,7 @@ }, "recommendationTemplateArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) for the recommendation template.

    " + "documentation":"

    Amazon Resource Name (ARN) for the recommendation template.

    " }, "recommendationTypes":{ "shape":"RenderRecommendationTypeList", @@ -3815,11 +4231,11 @@ }, "status":{ "shape":"RecommendationTemplateStatus", - "documentation":"

    The status of the action.

    " + "documentation":"

    Status of the action.

    " }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " }, "templatesLocation":{ "shape":"S3Location", @@ -3853,7 +4269,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appRegistryAppNames":{ "shape":"EntityNameList", @@ -3886,7 +4302,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -3933,7 +4349,7 @@ }, "policyArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "policyDescription":{ "shape":"EntityDescription", @@ -3945,7 +4361,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " }, "tier":{ "shape":"ResiliencyPolicyTier", @@ -3961,7 +4377,8 @@ "Critical", "Important", "CoreServices", - "NonCritical" + "NonCritical", + "NotApplicable" ] }, "ResiliencyScore":{ @@ -3991,7 +4408,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -4010,7 +4427,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -4022,7 +4439,7 @@ }, "status":{ "shape":"ResourceResolutionStatusType", - "documentation":"

    The status of the action.

    " + "documentation":"

    Status of the action.

    " } } }, @@ -4031,11 +4448,11 @@ "members":{ "logicalResourceId":{ "shape":"String255", - "documentation":"

    This is the identifier of the resource.

    " + "documentation":"

    Identifier of the logical resource.

    " }, "physicalResourceId":{ "shape":"String255", - "documentation":"

    This is the identifier of the physical resource.

    " + "documentation":"

    Identifier of the physical resource.

    " }, "reason":{ "shape":"ErrorMessage", @@ -4095,7 +4512,7 @@ }, "eksSourceName":{ "shape":"String255", - "documentation":"

    The name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

    This parameter accepts values in \"eks-cluster/namespace\" format.

    " + "documentation":"

    Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

    This parameter accepts values in \"eks-cluster/namespace\" format.

    " }, "logicalStackName":{ "shape":"String255", @@ -4107,15 +4524,15 @@ }, "physicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"

    The identifier of this resource.

    " + "documentation":"

    Identifier of the physical resource.

    " }, "resourceGroupName":{ "shape":"EntityName", - "documentation":"

    The name of the resource group this resource is mapped to.

    " + "documentation":"

    Name of the resource group that the resource is mapped to.

    " }, "resourceName":{ "shape":"EntityName", - "documentation":"

    The name of the resource this resource is mapped to.

    " + "documentation":"

    Name of the resource that the resource is mapped to.

    " }, "terraformSourceName":{ "shape":"String255", @@ -4229,11 +4646,11 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"

    The name of the Application Component.

    " + "documentation":"

    Name of the Application Component.

    " }, "description":{ "shape":"String500", - "documentation":"

    The description of the SOP recommendation.

    " + "documentation":"

    Description of the SOP recommendation.

    " }, "items":{ "shape":"RecommendationItemList", @@ -4241,11 +4658,11 @@ }, "name":{ "shape":"DocumentName", - "documentation":"

    The name of the SOP recommendation.

    " + "documentation":"

    Name of the SOP recommendation.

    " }, "prerequisite":{ "shape":"String500", - "documentation":"

    The prerequisite for the SOP recommendation.

    " + "documentation":"

    Prerequisite for the SOP recommendation.

    " }, "recommendationId":{ "shape":"Uuid", @@ -4253,7 +4670,7 @@ }, "referenceId":{ "shape":"SpecReferenceId", - "documentation":"

    The reference identifier for the SOP recommendation.

    " + "documentation":"

    Reference identifier for the SOP recommendation.

    " }, "serviceType":{ "shape":"SopServiceType", @@ -4285,7 +4702,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", @@ -4302,7 +4719,7 @@ }, "tags":{ "shape":"TagMap", - "documentation":"

    The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " + "documentation":"

    Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key/value pair.

    " } } }, @@ -4378,7 +4795,7 @@ "members":{ "resourceArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "documentation":"

    Amazon Resource Name (ARN) of the resource.

    ", "location":"uri", "locationName":"resourceArn" }, @@ -4427,7 +4844,7 @@ "members":{ "appComponentName":{ "shape":"EntityId", - "documentation":"

    The name of the Application Component.

    " + "documentation":"

    Name of the Application Component.

    " }, "dependsOnAlarms":{ "shape":"AlarmReferenceIdList", @@ -4435,11 +4852,11 @@ }, "description":{ "shape":"String500", - "documentation":"

    The description for the test recommendation.

    " + "documentation":"

    Description for the test recommendation.

    " }, "intent":{ "shape":"EntityDescription", - "documentation":"

    The intent of the test recommendation.

    " + "documentation":"

    Intent of the test recommendation.

    " }, "items":{ "shape":"RecommendationItemList", @@ -4447,11 +4864,11 @@ }, "name":{ "shape":"DocumentName", - "documentation":"

    The name of the test recommendation.

    " + "documentation":"

    Name of the test recommendation.

    " }, "prerequisite":{ "shape":"String500", - "documentation":"

    The prerequisite of the test recommendation.

    " + "documentation":"

    Prerequisite of the test recommendation.

    " }, "recommendationId":{ "shape":"Uuid", @@ -4459,15 +4876,15 @@ }, "referenceId":{ "shape":"SpecReferenceId", - "documentation":"

    The reference identifier for the test recommendation.

    " + "documentation":"

    Reference identifier for the test recommendation.

    " }, "risk":{ "shape":"TestRisk", - "documentation":"

    The level of risk for this test recommendation.

    " + "documentation":"

    Level of risk for this test recommendation.

    " }, "type":{ "shape":"TestType", - "documentation":"

    The type of test recommendation.

    " + "documentation":"

    Type of test recommendation.

    " } }, "documentation":"

    Defines a test recommendation.

    " @@ -4520,11 +4937,11 @@ "members":{ "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"

    The logical resource identifier for the unsupported resource.

    " + "documentation":"

    Logical resource identifier for the unsupported resource.

    " }, "physicalResourceId":{ "shape":"PhysicalResourceId", - "documentation":"

    The physical resource identifier for the unsupported resource.

    " + "documentation":"

    Physical resource identifier for the unsupported resource.

    " }, "resourceType":{ "shape":"String255", @@ -4550,7 +4967,7 @@ "members":{ "resourceArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resource.

    ", + "documentation":"

    Amazon Resource Name (ARN) of the resource.

    ", "location":"uri", "locationName":"resourceArn" }, @@ -4573,7 +4990,7 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "assessmentSchedule":{ "shape":"AppAssessmentScheduleType", @@ -4587,9 +5004,17 @@ "shape":"EntityDescription", "documentation":"

    The optional description for an app.

    " }, + "eventSubscriptions":{ + "shape":"EventSubscriptionList", + "documentation":"

    The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected and Scheduled assessment failure events.

    " + }, + "permissionModel":{ + "shape":"PermissionModel", + "documentation":"

    Defines the roles and credentials that Resilience Hub would use while creating an application, importing its resources, and running an assessment.

    " + }, "policyArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -4616,19 +5041,19 @@ }, "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "id":{ "shape":"String255", - "documentation":"

    The identifier of the Application Component.

    " + "documentation":"

    Identifier of the Application Component.

    " }, "name":{ "shape":"String255", - "documentation":"

    The name of the Application Component.

    " + "documentation":"

    Name of the Application Component.

    " }, "type":{ "shape":"String255", - "documentation":"

    The type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

    " + "documentation":"

    Type of Application Component. For more information about the types of Application Component, see Grouping resources in an AppComponent.

    " } } }, @@ -4641,15 +5066,15 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appComponent":{ "shape":"AppComponent", - "documentation":"

    The list of Application Components that belong to this resource.

    " + "documentation":"

    List of Application Components that belong to this resource.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " } } }, @@ -4663,7 +5088,7 @@ }, "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " } } }, @@ -4677,19 +5102,19 @@ }, "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appComponents":{ "shape":"AppComponentNameList", - "documentation":"

    The list of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

    " + "documentation":"

    List of Application Components that this resource belongs to. If an Application Component is not part of the Resilience Hub application, it will be added.

    " }, "awsAccountId":{ "shape":"CustomerId", - "documentation":"

    The Amazon Web Services account that owns the physical resource.

    " + "documentation":"

    Amazon Web Services account that owns the physical resource.

    " }, "awsRegion":{ "shape":"AwsRegion", - "documentation":"

    The Amazon Web Services region that owns the physical resource.

    " + "documentation":"

    Amazon Web Services region that owns the physical resource.

    " }, "excluded":{ "shape":"BooleanOptional", @@ -4697,19 +5122,19 @@ }, "logicalResourceId":{ "shape":"LogicalResourceId", - "documentation":"

    The logical identifier of the resource.

    " + "documentation":"

    Logical identifier of the resource.

    " }, "physicalResourceId":{ "shape":"String2048", - "documentation":"

    The physical identifier of the resource.

    " + "documentation":"

    Physical identifier of the resource.

    " }, "resourceName":{ "shape":"EntityName", - "documentation":"

    The name of the resource.

    " + "documentation":"

    Name of the resource.

    " }, "resourceType":{ "shape":"String255", - "documentation":"

    The type of resource.

    " + "documentation":"

    Type of resource.

    " } } }, @@ -4722,11 +5147,11 @@ "members":{ "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " }, "physicalResource":{ "shape":"PhysicalResource", @@ -4747,14 +5172,70 @@ }, "appArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "appVersion":{ "shape":"EntityVersion", - "documentation":"

    The Resilience Hub application version.

    " + "documentation":"

    Resilience Hub application version.

    " } } }, + "UpdateRecommendationStatusItem":{ + "type":"structure", + "members":{ + "resourceId":{ + "shape":"String500", + "documentation":"

    Resource identifier of the operational recommendation item.

    " + }, + "targetAccountId":{ + "shape":"CustomerId", + "documentation":"

    Identifier of the target Amazon Web Services account.

    " + }, + "targetRegion":{ + "shape":"AwsRegion", + "documentation":"

    Identifier of the target Amazon Web Services Region.

    " + } + }, + "documentation":"

    Defines the operational recommendation item that needs a status update.

    " + }, + "UpdateRecommendationStatusRequestEntries":{ + "type":"list", + "member":{"shape":"UpdateRecommendationStatusRequestEntry"}, + "max":50, + "min":1 + }, + "UpdateRecommendationStatusRequestEntry":{ + "type":"structure", + "required":[ + "entryId", + "excluded", + "item", + "referenceId" + ], + "members":{ + "entryId":{ + "shape":"String255", + "documentation":"

    An identifier for an entry in this batch that is used to communicate the result.

    The entryIds of a batch request need to be unique within a request.

    " + }, + "excludeReason":{ + "shape":"ExcludeRecommendationReason", + "documentation":"

    Indicates the reason for excluding an operational recommendation.

    " + }, + "excluded":{ + "shape":"BooleanOptional", + "documentation":"

    Indicates if the operational recommendation needs to be excluded. If set to True, the operational recommendation will be excluded.

    " + }, + "item":{ + "shape":"UpdateRecommendationStatusItem", + "documentation":"

    The operational recommendation item.

    " + }, + "referenceId":{ + "shape":"SpecReferenceId", + "documentation":"

    Reference identifier of the operational recommendation item.

    " + } + }, + "documentation":"

    Defines the operational recommendation item that is to be included or excluded.

    " + }, "UpdateResiliencyPolicyRequest":{ "type":"structure", "required":["policyArn"], @@ -4769,7 +5250,7 @@ }, "policyArn":{ "shape":"Arn", - "documentation":"

    The Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " + "documentation":"

    Amazon Resource Name (ARN) of the resiliency policy. The format for this ARN is: arn:partition:resiliencehub:region:account:resiliency-policy/policy-id. For more information about ARNs, see Amazon Resource Names (ARNs) in the AWS General Reference guide.

    " }, "policyDescription":{ "shape":"EntityDescription", diff --git a/services/resourceexplorer2/pom.xml b/services/resourceexplorer2/pom.xml index 2ca8df85396..22587e02f75 100644 --- a/services/resourceexplorer2/pom.xml +++ b/services/resourceexplorer2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT resourceexplorer2 AWS Java SDK :: Services :: Resource Explorer 2 diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index d5da3390b8a..651a5d70f92 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index d3852054544..d411662b193 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index f9a780221fc..838cc84847e 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/rolesanywhere/pom.xml b/services/rolesanywhere/pom.xml index e7e5181176b..2dc9be0a4a6 100644 --- a/services/rolesanywhere/pom.xml +++ b/services/rolesanywhere/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT rolesanywhere AWS Java SDK :: Services :: Roles Anywhere diff --git a/services/route53/pom.xml b/services/route53/pom.xml index 24337a454f6..196f1971c38 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/route53/src/main/resources/codegen-resources/endpoint-rule-set.json index e956969175b..eef1699a130 100644 --- a/services/route53/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/route53/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,64 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws" + "ref": "Region" + } ] } ], @@ -128,22 +111,13 @@ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -151,1276 +125,581 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.api.aws", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + true + ] + }, { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53-fips.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws-cn" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://route53-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + false + ] + }, { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "cn-northwest-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://route53.us-gov.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "endpoint": { - "url": "https://route-53.{Region}.api.aws", - "properties": { - "authSchemes": [ + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } + "ref": "PartitionResult" + }, + "name" ] }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" + "aws-us-gov" + ] }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53.us-gov.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws-iso" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://route53.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-iso-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-iso-b" ] - } - ], - "type": "tree", - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" - } - ] + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } - ] + ], + "endpoint": { + "url": "https://route53.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-isob-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" }, { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.amazonaws.com.cn", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] } ] }, - "headers": {} - }, - "type": "endpoint" + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://route-53.{Region}.api.amazonwebservices.com.cn", - "properties": { - "authSchemes": [ + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] } ] - }, - "headers": {} - }, - "type": "endpoint" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://route53-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-us-gov" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.api.aws", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] } ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + } + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "ref": "PartitionResult" - }, - "supportsDualStack" + "conditions": [], + "endpoint": { + "url": "https://route53.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } ] } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://route-53.{Region}.api.aws", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-iso" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://route-53-fips.{Region}.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "route53" - } - ] - }, + "url": "https://route53.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-iso-b" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route-53-fips.{Region}.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://route53-fips.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://route53.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://route53.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-cn-global" - ] - } - ], - "endpoint": { - "url": "https://route53.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "cn-northwest-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://route53.us-gov.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-iso-global" - ] - } - ], - "endpoint": { - "url": "https://route53.c2s.ic.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-iso-b-global" - ] - } - ], - "endpoint": { - "url": "https://route53.sc2s.sgov.gov", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "route53" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://route53.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/route53/src/main/resources/codegen-resources/endpoint-tests.json b/services/route53/src/main/resources/codegen-resources/endpoint-tests.json index b5c63bf822e..fe838c14551 100644 --- a/services/route53/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/route53/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,5 +1,119 @@ { "testCases": [ + { + "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://route53.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region aws-global with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://route53-fips.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://route53-fips.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://route53.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", "expect": { @@ -17,13 +131,52 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-cn-global", "UseFIPS": false, - "Region": "aws-cn-global" + "UseDualStack": false } }, { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { @@ -31,17 +184,131 @@ { "name": "sigv4", "signingName": "route53", - "signingRegion": "us-east-1" + "signingRegion": "cn-northwest-1" } ] }, - "url": "https://route53.amazonaws.com" + "url": "https://route53.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region aws-us-gov-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53.us-gov.amazonaws.com" + } + }, + "params": { + "Region": "aws-us-gov-global", "UseFIPS": false, - "Region": "aws-global" + "UseDualStack": false + } + }, + { + "documentation": "For region aws-us-gov-global with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53.us-gov.amazonaws.com" + } + }, + "params": { + "Region": "aws-us-gov-global", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53.us-gov.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://route53.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://route53.us-gov.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,9 +328,66 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-iso-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "aws-iso-global" + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "route53", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://route53.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -83,13 +407,48 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-iso-b-global", "UseFIPS": false, - "Region": "aws-iso-b-global" + "UseDualStack": false } }, { - "documentation": "For region aws-us-gov-global with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://route53-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { @@ -97,30 +456,43 @@ { "name": "sigv4", "signingName": "route53", - "signingRegion": "us-gov-west-1" + "signingRegion": "us-isob-east-1" } ] }, - "url": "https://route53.us-gov.amazonaws.com" + "url": "https://route53.sc2s.sgov.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -130,9 +502,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -142,11 +514,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/route53/src/main/resources/codegen-resources/service-2.json b/services/route53/src/main/resources/codegen-resources/service-2.json index e7cdfb886d3..f42dd5d8adc 100644 --- a/services/route53/src/main/resources/codegen-resources/service-2.json +++ b/services/route53/src/main/resources/codegen-resources/service-2.json @@ -95,7 +95,7 @@ {"shape":"InvalidInput"}, {"shape":"PriorRequestNotComplete"} ], - "documentation":"

    Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.

    Deleting Resource Record Sets

    To delete a resource record set, you must specify all the same values that you specified when you created it.

    Change Batches and Transactional Changes

    The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.

    For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.

    If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.

    Traffic Flow

    To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.

    Create, Delete, and Upsert

    Use ChangeResourceRecordsSetsRequest to perform the following actions:

    • CREATE: Creates a resource record set that has the specified values.

    • DELETE: Deletes an existing resource record set that has the specified values.

    • UPSERT: If a resource set exists Route 53 updates it with the values in the request.

    Syntaxes for Creating, Updating, and Deleting Resource Record Sets

    The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.

    For an example for each type of resource record set, see \"Examples.\"

    Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.

    Change Propagation to Route 53 DNS Servers

    When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.

    Limits on ChangeResourceRecordSets Requests

    For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.

    " + "documentation":"

    Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.

    Deleting Resource Record Sets

    To delete a resource record set, you must specify all the same values that you specified when you created it.

    Change Batches and Transactional Changes

    The request body must include a document with a ChangeResourceRecordSetsRequest element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.

    For example, suppose a change batch request contains two changes: it deletes the CNAME resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE or the CREATE action fails, then the request is canceled, and the original CNAME record continues to exist.

    If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch error.

    Traffic Flow

    To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.

    Create, Delete, and Upsert

    Use ChangeResourceRecordsSetsRequest to perform the following actions:

    • CREATE: Creates a resource record set that has the specified values.

    • DELETE: Deletes an existing resource record set that has the specified values.

    • UPSERT: If a resource set exists Route 53 updates it with the values in the request.

    Syntaxes for Creating, Updating, and Deleting Resource Record Sets

    The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.

    For an example for each type of resource record set, see \"Examples.\"

    Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets.

    Change Propagation to Route 53 DNS Servers

    When you submit a ChangeResourceRecordSets request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers managing the hosted zone. While your changes are propagating, GetChange returns a status of PENDING. When propagation is complete, GetChange returns a status of INSYNC. Changes generally propagate to all Route 53 name servers managing the hosted zone within 60 seconds. For more information, see GetChange.

    Limits on ChangeResourceRecordSets Requests

    For information about the limits on a ChangeResourceRecordSets request, see Limits in the Amazon Route 53 Developer Guide.

    " }, "ChangeTagsForResource":{ "name":"ChangeTagsForResource", @@ -600,7 +600,7 @@ {"shape":"NoSuchChange"}, {"shape":"InvalidInput"} ], - "documentation":"

    Returns the current status of a change batch request. The status is one of the following values:

    • PENDING indicates that the changes in this request have not propagated to all Amazon Route 53 DNS servers. This is the initial status of all change batch requests.

    • INSYNC indicates that the changes have propagated to all Route 53 DNS servers.

    " + "documentation":"

    Returns the current status of a change batch request. The status is one of the following values:

    • PENDING indicates that the changes in this request have not propagated to all Amazon Route 53 DNS servers managing the hosted zone. This is the initial status of all change batch requests.

    • INSYNC indicates that the changes have propagated to all Route 53 DNS servers managing the hosted zone.

    " }, "GetCheckerIpRanges":{ "name":"GetCheckerIpRanges", @@ -1107,7 +1107,7 @@ {"shape":"NoSuchHostedZone"}, {"shape":"InvalidInput"} ], - "documentation":"

    Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask.

    This call only supports querying public hosted zones.

    " + "documentation":"

    Gets the value that Amazon Route 53 returns in response to a DNS request for a specified record name and type. You can optionally specify the IP address of a DNS resolver, an EDNS0 client subnet IP address, and a subnet mask.

    This call only supports querying public hosted zones.

    The TestDnsAnswer returns information similar to what you would expect from the answer section of the dig command. Therefore, if you query for the name servers of a subdomain that point to the parent name servers, those will not be returned.

    " }, "UpdateHealthCheck":{ "name":"UpdateHealthCheck", @@ -1781,7 +1781,8 @@ "us-iso-east-1", "us-iso-west-1", "us-isob-east-1", - "ap-southeast-4" + "ap-southeast-4", + "il-central-1" ], "max":64, "min":1 @@ -1958,7 +1959,7 @@ }, "DelegationSetId":{ "shape":"ResourceId", - "documentation":"

    If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet.

    " + "documentation":"

    If you want to associate a reusable delegation set with this hosted zone, the ID that Amazon Route 53 assigned to the reusable delegation set when you created it. For more information about reusable delegation sets, see CreateReusableDelegationSet.

    If you are using a reusable delegation set to create a public hosted zone for a subdomain, make sure that the parent hosted zone doesn't use one or more of the same name servers. If you have overlapping nameservers, the operation will cause a ConflictingDomainsExist error.

    " } }, "documentation":"

    A complex type that contains information about the request to create a public or private hosted zone.

    " @@ -5469,7 +5470,8 @@ "af-south-1", "eu-south-1", "eu-south-2", - "ap-southeast-4" + "ap-southeast-4", + "il-central-1" ], "max":64, "min":1 @@ -6344,7 +6346,8 @@ "af-south-1", "eu-south-1", "eu-south-2", - "ap-southeast-4" + "ap-southeast-4", + "il-central-1" ], "max":64, "min":1 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index 2c24af10edb..81cf9e46861 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml index cd06448e077..0dd3739e76f 100644 --- a/services/route53recoverycluster/pom.xml +++ b/services/route53recoverycluster/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT route53recoverycluster AWS Java SDK :: Services :: Route53 Recovery Cluster diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml index eb93cbac853..9e7d93e6b95 100644 --- a/services/route53recoverycontrolconfig/pom.xml +++ b/services/route53recoverycontrolconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT route53recoverycontrolconfig AWS Java SDK :: Services :: Route53 Recovery Control Config diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml index b8167512a7d..f9ec52f370c 100644 --- a/services/route53recoveryreadiness/pom.xml +++ b/services/route53recoveryreadiness/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT route53recoveryreadiness AWS Java SDK :: Services :: Route53 Recovery Readiness diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index e4bd57229fa..a628b91cb9b 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/rum/pom.xml b/services/rum/pom.xml index 65009c2b341..1df461cb60a 100644 --- a/services/rum/pom.xml +++ b/services/rum/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT rum AWS Java SDK :: Services :: RUM diff --git a/services/s3/pom.xml b/services/s3/pom.xml index e03dd358d58..f708f8c4238 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3IntegrationTestBase.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3IntegrationTestBase.java index 63dcf2ddc88..03cf42afe5d 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3IntegrationTestBase.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3IntegrationTestBase.java @@ -117,7 +117,7 @@ protected static void deleteBucketAndAllContents(String bucketName) { S3TestUtils.deleteBucketAndAllContents(s3, bucketName); } - private static class UserAgentVerifyingExecutionInterceptor implements ExecutionInterceptor { + protected static class UserAgentVerifyingExecutionInterceptor implements ExecutionInterceptor { private final String clientName; private final ClientType clientType; diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrossRegionCrtIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrossRegionCrtIntegrationTest.java index 953c6e4b4f4..72c6fce095c 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrossRegionCrtIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrossRegionCrtIntegrationTest.java @@ -17,7 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static software.amazon.awssdk.services.s3.crt.S3CrtClientCopyIntegrationTest.randomBytes; +import static software.amazon.awssdk.services.s3.multipart.S3ClientMultiPartCopyIntegrationTest.randomBytes; import static software.amazon.awssdk.services.s3.utils.ChecksumUtils.computeCheckSum; import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrtClientPutObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrtClientPutObjectIntegrationTest.java index c6761f66e17..f81e700395e 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrtClientPutObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrtClientPutObjectIntegrationTest.java @@ -47,7 +47,7 @@ public class S3CrtClientPutObjectIntegrationTest extends S3IntegrationTestBase { private static final String TEST_BUCKET = temporaryBucketName(S3CrtClientPutObjectIntegrationTest.class); private static final String TEST_KEY = "8mib_file.dat"; - private static final int OBJ_SIZE = 8 * 1024 * 1024; + private static final int OBJ_SIZE = 10 * 1024 * 1024; private static RandomTempFile testFile; private static S3AsyncClient s3Crt; diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrtClientCopyIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3ClientMultiPartCopyIntegrationTest.java similarity index 70% rename from services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrtClientCopyIntegrationTest.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3ClientMultiPartCopyIntegrationTest.java index f4d2b34c1cd..fc4f31b76b1 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/crt/S3CrtClientCopyIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3ClientMultiPartCopyIntegrationTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.s3.crt; +package software.amazon.awssdk.services.s3.multipart; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Fail.fail; @@ -24,13 +24,17 @@ import java.nio.ByteBuffer; import java.security.SecureRandom; import java.util.Base64; -import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; import javax.crypto.KeyGenerator; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.core.ClientType; import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.sync.ResponseTransformer; @@ -42,8 +46,9 @@ import software.amazon.awssdk.services.s3.model.MetadataDirective; import software.amazon.awssdk.utils.Md5Utils; -public class S3CrtClientCopyIntegrationTest extends S3IntegrationTestBase { - private static final String BUCKET = temporaryBucketName(S3CrtClientCopyIntegrationTest.class); +@Timeout(value = 3, unit = TimeUnit.MINUTES) +public class S3ClientMultiPartCopyIntegrationTest extends S3IntegrationTestBase { + private static final String BUCKET = temporaryBucketName(S3ClientMultiPartCopyIntegrationTest.class); private static final String ORIGINAL_OBJ = "test_file.dat"; private static final String COPIED_OBJ = "test_file_copy.dat"; private static final String ORIGINAL_OBJ_SPECIAL_CHARACTER = "original-special-chars-@$%"; @@ -51,6 +56,8 @@ public class S3CrtClientCopyIntegrationTest extends S3IntegrationTestBase { private static final long OBJ_SIZE = ThreadLocalRandom.current().nextLong(8 * 1024 * 1024, 16 * 1024 * 1024 + 1); private static final long SMALL_OBJ_SIZE = 1024 * 1024; private static S3AsyncClient s3CrtAsyncClient; + private static S3AsyncClient s3MpuClient; + @BeforeAll public static void setUp() throws Exception { S3IntegrationTestBase.setUp(); @@ -59,40 +66,56 @@ public static void setUp() throws Exception { .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) .region(DEFAULT_REGION) .build(); + s3MpuClient = S3AsyncClient.builder() + .region(DEFAULT_REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(o -> o.addExecutionInterceptor( + new UserAgentVerifyingExecutionInterceptor("NettyNio", ClientType.ASYNC))) + .multipartEnabled(true) + .build(); } @AfterAll public static void teardown() throws Exception { s3CrtAsyncClient.close(); + s3MpuClient.close(); deleteBucketAndAllContents(BUCKET); } - @Test - void copy_singlePart_hasSameContent() { + public static Stream s3AsyncClient() { + return Stream.of(s3MpuClient, s3CrtAsyncClient); + } + + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("s3AsyncClient") + void copy_singlePart_hasSameContent(S3AsyncClient s3AsyncClient) { byte[] originalContent = randomBytes(SMALL_OBJ_SIZE); createOriginalObject(originalContent, ORIGINAL_OBJ); - copyObject(ORIGINAL_OBJ, COPIED_OBJ); + copyObject(ORIGINAL_OBJ, COPIED_OBJ, s3AsyncClient); validateCopiedObject(originalContent, ORIGINAL_OBJ); } - @Test - void copy_copiedObject_hasSameContent() { + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("s3AsyncClient") + void copy_copiedObject_hasSameContent(S3AsyncClient s3AsyncClient) { byte[] originalContent = randomBytes(OBJ_SIZE); createOriginalObject(originalContent, ORIGINAL_OBJ); - copyObject(ORIGINAL_OBJ, COPIED_OBJ); + copyObject(ORIGINAL_OBJ, COPIED_OBJ, s3AsyncClient); validateCopiedObject(originalContent, ORIGINAL_OBJ); } - @Test - void copy_specialCharacters_hasSameContent() { + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("s3AsyncClient") + void copy_specialCharacters_hasSameContent(S3AsyncClient s3AsyncClient) { byte[] originalContent = randomBytes(OBJ_SIZE); createOriginalObject(originalContent, ORIGINAL_OBJ_SPECIAL_CHARACTER); - copyObject(ORIGINAL_OBJ_SPECIAL_CHARACTER, COPIED_OBJ_SPECIAL_CHARACTER); + copyObject(ORIGINAL_OBJ_SPECIAL_CHARACTER, COPIED_OBJ_SPECIAL_CHARACTER, s3AsyncClient); validateCopiedObject(originalContent, COPIED_OBJ_SPECIAL_CHARACTER); } - @Test - void copy_ssecServerSideEncryption_shouldSucceed() { + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("s3AsyncClient") + void copy_ssecServerSideEncryption_shouldSucceed(S3AsyncClient s3AsyncClient) { byte[] originalContent = randomBytes(OBJ_SIZE); byte[] secretKey = generateSecretKey(); String b64Key = Base64.getEncoder().encodeToString(secretKey); @@ -102,16 +125,14 @@ void copy_ssecServerSideEncryption_shouldSucceed() { String newB64Key = Base64.getEncoder().encodeToString(newSecretKey); String newB64KeyMd5 = Md5Utils.md5AsBase64(newSecretKey); - // Java S3 client is used because CRT S3 client putObject fails with SSE-C - // TODO: change back to S3CrtClient once the issue is fixed in CRT - s3Async.putObject(r -> r.bucket(BUCKET) - .key(ORIGINAL_OBJ) - .sseCustomerKey(b64Key) - .sseCustomerAlgorithm(AES256.name()) - .sseCustomerKeyMD5(b64KeyMd5), - AsyncRequestBody.fromBytes(originalContent)).join(); + s3AsyncClient.putObject(r -> r.bucket(BUCKET) + .key(ORIGINAL_OBJ) + .sseCustomerKey(b64Key) + .sseCustomerAlgorithm(AES256.name()) + .sseCustomerKeyMD5(b64KeyMd5), + AsyncRequestBody.fromBytes(originalContent)).join(); - CompletableFuture future = s3CrtAsyncClient.copyObject(c -> c + CompletableFuture future = s3AsyncClient.copyObject(c -> c .sourceBucket(BUCKET) .sourceKey(ORIGINAL_OBJ) .metadataDirective(MetadataDirective.REPLACE) @@ -143,12 +164,12 @@ private static byte[] generateSecretKey() { private void createOriginalObject(byte[] originalContent, String originalKey) { s3CrtAsyncClient.putObject(r -> r.bucket(BUCKET) - .key(originalKey), + .key(originalKey), AsyncRequestBody.fromBytes(originalContent)).join(); } - private void copyObject(String original, String destination) { - CompletableFuture future = s3CrtAsyncClient.copyObject(c -> c + private void copyObject(String original, String destination, S3AsyncClient s3AsyncClient) { + CompletableFuture future = s3AsyncClient.copyObject(c -> c .sourceBucket(BUCKET) .sourceKey(original) .destinationBucket(BUCKET) diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3MultipartClientPutObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3MultipartClientPutObjectIntegrationTest.java new file mode 100644 index 00000000000..fa31b5453e5 --- /dev/null +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/multipart/S3MultipartClientPutObjectIntegrationTest.java @@ -0,0 +1,136 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.multipart; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.util.Optional; +import java.util.UUID; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.core.ClientType; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3IntegrationTestBase; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.utils.ChecksumUtils; + +@Timeout(value = 30, unit = SECONDS) +public class S3MultipartClientPutObjectIntegrationTest extends S3IntegrationTestBase { + + private static final String TEST_BUCKET = temporaryBucketName(S3MultipartClientPutObjectIntegrationTest.class); + private static final String TEST_KEY = "testfile.dat"; + private static final int OBJ_SIZE = 19 * 1024 * 1024; + + private static File testFile; + private static S3AsyncClient mpuS3Client; + + @BeforeAll + public static void setup() throws Exception { + S3IntegrationTestBase.setUp(); + S3IntegrationTestBase.createBucket(TEST_BUCKET); + byte[] CONTENT = + RandomStringUtils.randomAscii(OBJ_SIZE).getBytes(Charset.defaultCharset()); + + testFile = File.createTempFile("SplittingPublisherTest", UUID.randomUUID().toString()); + Files.write(testFile.toPath(), CONTENT); + mpuS3Client = S3AsyncClient + .builder() + .region(DEFAULT_REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .overrideConfiguration(o -> o.addExecutionInterceptor( + new UserAgentVerifyingExecutionInterceptor("NettyNio", ClientType.ASYNC))) + .multipartEnabled(true) + .build(); + } + + @AfterAll + public static void teardown() throws Exception { + mpuS3Client.close(); + testFile.delete(); + deleteBucketAndAllContents(TEST_BUCKET); + } + + @Test + void putObject_fileRequestBody_objectSentCorrectly() throws Exception { + AsyncRequestBody body = AsyncRequestBody.fromFile(testFile.toPath()); + mpuS3Client.putObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), body).join(); + + ResponseInputStream objContent = + S3IntegrationTestBase.s3.getObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), + ResponseTransformer.toInputStream()); + + assertThat(objContent.response().contentLength()).isEqualTo(testFile.length()); + byte[] expectedSum = ChecksumUtils.computeCheckSum(Files.newInputStream(testFile.toPath())); + assertThat(ChecksumUtils.computeCheckSum(objContent)).isEqualTo(expectedSum); + } + + @Test + void putObject_byteAsyncRequestBody_objectSentCorrectly() throws Exception { + byte[] bytes = RandomStringUtils.randomAscii(OBJ_SIZE).getBytes(Charset.defaultCharset()); + AsyncRequestBody body = AsyncRequestBody.fromBytes(bytes); + mpuS3Client.putObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), body).join(); + + ResponseInputStream objContent = + S3IntegrationTestBase.s3.getObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), + ResponseTransformer.toInputStream()); + + assertThat(objContent.response().contentLength()).isEqualTo(OBJ_SIZE); + byte[] expectedSum = ChecksumUtils.computeCheckSum(new ByteArrayInputStream(bytes)); + assertThat(ChecksumUtils.computeCheckSum(objContent)).isEqualTo(expectedSum); + } + + @Test + void putObject_unknownContentLength_objectSentCorrectly() throws Exception { + AsyncRequestBody body = FileAsyncRequestBody.builder() + .path(testFile.toPath()) + .build(); + mpuS3Client.putObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + body.subscribe(s); + } + }).get(30, SECONDS); + + ResponseInputStream objContent = + S3IntegrationTestBase.s3.getObject(r -> r.bucket(TEST_BUCKET).key(TEST_KEY), + ResponseTransformer.toInputStream()); + + assertThat(objContent.response().contentLength()).isEqualTo(testFile.length()); + byte[] expectedSum = ChecksumUtils.computeCheckSum(Files.newInputStream(testFile.toPath())); + assertThat(ChecksumUtils.computeCheckSum(objContent)).isEqualTo(expectedSum); + } + +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3CrtAsyncClientBuilder.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3CrtAsyncClientBuilder.java index 7d119c2f45b..78dc144f661 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3CrtAsyncClientBuilder.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/S3CrtAsyncClientBuilder.java @@ -234,6 +234,26 @@ default S3CrtAsyncClientBuilder retryConfiguration(Consumer + * Multipart uploads are easier to recover from and also potentially faster than single part uploads, especially when the + * upload parts can be uploaded in parallel. Because there are additional network API calls, small objects are still + * recommended to use a single connection for the upload. See + * Uploading and copying objects using + * multipart upload. + * + *

    + * By default, it is the same as {@link #minimumPartSizeInBytes(Long)}. + * + * @param thresholdInBytes the value of the threshold to set. + * @return an instance of this builder. + */ + S3CrtAsyncClientBuilder thresholdInBytes(Long thresholdInBytes); + @Override S3AsyncClient build(); } \ No newline at end of file diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/client/S3AsyncClientDecorator.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/client/S3AsyncClientDecorator.java index 2dbb61091da..b751cb29c1b 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/client/S3AsyncClientDecorator.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/client/S3AsyncClientDecorator.java @@ -23,11 +23,17 @@ import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.endpoints.S3ClientContextParams; import software.amazon.awssdk.services.s3.internal.crossregion.S3CrossRegionAsyncClient; +import software.amazon.awssdk.services.s3.internal.multipart.MultipartS3AsyncClient; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.ConditionalDecorator; @SdkInternalApi public class S3AsyncClientDecorator { + public static final AttributeMap.Key MULTIPART_CONFIGURATION_KEY = + new AttributeMap.Key(MultipartConfiguration.class){}; + public static final AttributeMap.Key MULTIPART_ENABLED_KEY = + new AttributeMap.Key(Boolean.class){}; public S3AsyncClientDecorator() { } @@ -36,14 +42,26 @@ public S3AsyncClient decorate(S3AsyncClient base, SdkClientConfiguration clientConfiguration, AttributeMap clientContextParams) { List> decorators = new ArrayList<>(); - decorators.add(ConditionalDecorator.create(isCrossRegionEnabledAsync(clientContextParams), - S3CrossRegionAsyncClient::new)); + decorators.add(ConditionalDecorator.create( + isCrossRegionEnabledAsync(clientContextParams), + S3CrossRegionAsyncClient::new)); + + decorators.add(ConditionalDecorator.create( + isMultipartEnable(clientContextParams), + client -> { + MultipartConfiguration multipartConfiguration = clientContextParams.get(MULTIPART_CONFIGURATION_KEY); + return MultipartS3AsyncClient.create(client, multipartConfiguration); + })); return ConditionalDecorator.decorate(base, decorators); } private Predicate isCrossRegionEnabledAsync(AttributeMap clientContextParams) { Boolean crossRegionEnabled = clientContextParams.get(S3ClientContextParams.CROSS_REGION_ACCESS_ENABLED); - return client -> crossRegionEnabled != null && crossRegionEnabled.booleanValue(); + return client -> crossRegionEnabled != null && crossRegionEnabled.booleanValue(); } + private Predicate isMultipartEnable(AttributeMap clientContextParams) { + Boolean multipartEnabled = clientContextParams.get(MULTIPART_ENABLED_KEY); + return client -> multipartEnabled != null && multipartEnabled.booleanValue(); + } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyRequestConversionUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyRequestConversionUtils.java deleted file mode 100644 index 2a464b10f49..00000000000 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyRequestConversionUtils.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3.internal.crt; - -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; -import software.amazon.awssdk.services.s3.model.CompletedPart; -import software.amazon.awssdk.services.s3.model.CopyObjectRequest; -import software.amazon.awssdk.services.s3.model.CopyObjectResponse; -import software.amazon.awssdk.services.s3.model.CopyPartResult; -import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.HeadObjectRequest; -import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; - -/** - * Request conversion utility method for POJO classes associated with {@link S3CrtAsyncClient#copyObject(CopyObjectRequest)} - */ -@SdkInternalApi -public final class CopyRequestConversionUtils { - - private CopyRequestConversionUtils() { - } - - public static HeadObjectRequest toHeadObjectRequest(CopyObjectRequest copyObjectRequest) { - return HeadObjectRequest.builder() - .bucket(copyObjectRequest.sourceBucket()) - .key(copyObjectRequest.sourceKey()) - .versionId(copyObjectRequest.sourceVersionId()) - .ifMatch(copyObjectRequest.copySourceIfMatch()) - .ifModifiedSince(copyObjectRequest.copySourceIfModifiedSince()) - .ifNoneMatch(copyObjectRequest.copySourceIfNoneMatch()) - .ifUnmodifiedSince(copyObjectRequest.copySourceIfUnmodifiedSince()) - .expectedBucketOwner(copyObjectRequest.expectedSourceBucketOwner()) - .sseCustomerAlgorithm(copyObjectRequest.copySourceSSECustomerAlgorithm()) - .sseCustomerKey(copyObjectRequest.copySourceSSECustomerKey()) - .sseCustomerKeyMD5(copyObjectRequest.copySourceSSECustomerKeyMD5()) - .build(); - } - - public static CompletedPart toCompletedPart(CopyPartResult copyPartResult, int partNumber) { - return CompletedPart.builder() - .partNumber(partNumber) - .eTag(copyPartResult.eTag()) - .checksumCRC32C(copyPartResult.checksumCRC32C()) - .checksumCRC32(copyPartResult.checksumCRC32()) - .checksumSHA1(copyPartResult.checksumSHA1()) - .checksumSHA256(copyPartResult.checksumSHA256()) - .eTag(copyPartResult.eTag()) - .build(); - } - - public static CreateMultipartUploadRequest toCreateMultipartUploadRequest(CopyObjectRequest copyObjectRequest) { - return CreateMultipartUploadRequest.builder() - .bucket(copyObjectRequest.destinationBucket()) - .contentEncoding(copyObjectRequest.contentEncoding()) - .checksumAlgorithm(copyObjectRequest.checksumAlgorithmAsString()) - .tagging(copyObjectRequest.tagging()) - .contentType(copyObjectRequest.contentType()) - .contentLanguage(copyObjectRequest.contentLanguage()) - .contentDisposition(copyObjectRequest.contentDisposition()) - .cacheControl(copyObjectRequest.cacheControl()) - .expires(copyObjectRequest.expires()) - .key(copyObjectRequest.destinationKey()) - .websiteRedirectLocation(copyObjectRequest.websiteRedirectLocation()) - .expectedBucketOwner(copyObjectRequest.expectedBucketOwner()) - .requestPayer(copyObjectRequest.requestPayerAsString()) - .acl(copyObjectRequest.aclAsString()) - .grantRead(copyObjectRequest.grantRead()) - .grantReadACP(copyObjectRequest.grantReadACP()) - .grantWriteACP(copyObjectRequest.grantWriteACP()) - .grantFullControl(copyObjectRequest.grantFullControl()) - .storageClass(copyObjectRequest.storageClassAsString()) - .ssekmsKeyId(copyObjectRequest.ssekmsKeyId()) - .sseCustomerKey(copyObjectRequest.sseCustomerKey()) - .sseCustomerAlgorithm(copyObjectRequest.sseCustomerAlgorithm()) - .sseCustomerKeyMD5(copyObjectRequest.sseCustomerKeyMD5()) - .ssekmsEncryptionContext(copyObjectRequest.ssekmsEncryptionContext()) - .serverSideEncryption(copyObjectRequest.serverSideEncryptionAsString()) - .bucketKeyEnabled(copyObjectRequest.bucketKeyEnabled()) - .objectLockMode(copyObjectRequest.objectLockModeAsString()) - .objectLockLegalHoldStatus(copyObjectRequest.objectLockLegalHoldStatusAsString()) - .objectLockRetainUntilDate(copyObjectRequest.objectLockRetainUntilDate()) - .metadata(copyObjectRequest.metadata()) - .build(); - } - - public static CopyObjectResponse toCopyObjectResponse(CompleteMultipartUploadResponse response) { - CopyObjectResponse.Builder builder = CopyObjectResponse.builder() - .versionId(response.versionId()) - .copyObjectResult(b -> b.checksumCRC32(response.checksumCRC32()) - .checksumSHA1(response.checksumSHA1()) - .checksumSHA256(response.checksumSHA256()) - .checksumCRC32C(response.checksumCRC32C()) - .eTag(response.eTag()) - .build()) - .expiration(response.expiration()) - .bucketKeyEnabled(response.bucketKeyEnabled()) - .serverSideEncryption(response.serverSideEncryption()) - .ssekmsKeyId(response.ssekmsKeyId()) - .serverSideEncryption(response.serverSideEncryptionAsString()) - .requestCharged(response.requestChargedAsString()); - if (response.responseMetadata() != null) { - builder.responseMetadata(response.responseMetadata()); - } - - if (response.sdkHttpResponse() != null) { - builder.sdkHttpResponse(response.sdkHttpResponse()); - } - - return builder.build(); - } - - public static AbortMultipartUploadRequest toAbortMultipartUploadRequest(CopyObjectRequest copyObjectRequest, - String uploadId) { - return AbortMultipartUploadRequest.builder() - .uploadId(uploadId) - .bucket(copyObjectRequest.destinationBucket()) - .key(copyObjectRequest.destinationKey()) - .requestPayer(copyObjectRequest.requestPayerAsString()) - .expectedBucketOwner(copyObjectRequest.expectedBucketOwner()) - .build(); - } - - public static UploadPartCopyRequest toUploadPartCopyRequest(CopyObjectRequest copyObjectRequest, - int partNumber, - String uploadId, - String range) { - - return UploadPartCopyRequest.builder() - .sourceBucket(copyObjectRequest.sourceBucket()) - .sourceKey(copyObjectRequest.sourceKey()) - .sourceVersionId(copyObjectRequest.sourceVersionId()) - .uploadId(uploadId) - .partNumber(partNumber) - .destinationBucket(copyObjectRequest.destinationBucket()) - .destinationKey(copyObjectRequest.destinationKey()) - .copySourceIfMatch(copyObjectRequest.copySourceIfMatch()) - .copySourceIfNoneMatch(copyObjectRequest.copySourceIfNoneMatch()) - .copySourceIfUnmodifiedSince(copyObjectRequest.copySourceIfUnmodifiedSince()) - .copySourceRange(range) - .copySourceSSECustomerAlgorithm(copyObjectRequest.copySourceSSECustomerAlgorithm()) - .copySourceSSECustomerKeyMD5(copyObjectRequest.copySourceSSECustomerKeyMD5()) - .copySourceSSECustomerKey(copyObjectRequest.copySourceSSECustomerKey()) - .copySourceIfModifiedSince(copyObjectRequest.copySourceIfModifiedSince()) - .expectedBucketOwner(copyObjectRequest.expectedBucketOwner()) - .expectedSourceBucketOwner(copyObjectRequest.expectedSourceBucketOwner()) - .requestPayer(copyObjectRequest.requestPayerAsString()) - .sseCustomerKey(copyObjectRequest.sseCustomerKey()) - .sseCustomerAlgorithm(copyObjectRequest.sseCustomerAlgorithm()) - .sseCustomerKeyMD5(copyObjectRequest.sseCustomerKeyMD5()) - .build(); - } - -} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java index 5ab689637c4..284748f163b 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/DefaultS3CrtAsyncClient.java @@ -53,6 +53,7 @@ import software.amazon.awssdk.services.s3.S3CrtAsyncClientBuilder; import software.amazon.awssdk.services.s3.crt.S3CrtHttpConfiguration; import software.amazon.awssdk.services.s3.crt.S3CrtRetryConfiguration; +import software.amazon.awssdk.services.s3.internal.multipart.CopyObjectHelper; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.CopyObjectResponse; import software.amazon.awssdk.services.s3.model.GetObjectRequest; @@ -69,7 +70,10 @@ private DefaultS3CrtAsyncClient(DefaultS3CrtClientBuilder builder) { super(initializeS3AsyncClient(builder)); long partSizeInBytes = builder.minimalPartSizeInBytes == null ? DEFAULT_PART_SIZE_IN_BYTES : builder.minimalPartSizeInBytes; - this.copyObjectHelper = new CopyObjectHelper((S3AsyncClient) delegate(), partSizeInBytes); + long thresholdInBytes = builder.thresholdInBytes == null ? partSizeInBytes : builder.thresholdInBytes; + this.copyObjectHelper = new CopyObjectHelper((S3AsyncClient) delegate(), + partSizeInBytes, + thresholdInBytes); } @Override @@ -114,6 +118,7 @@ private static S3CrtAsyncHttpClient.Builder initializeS3CrtAsyncHttpClient(Defau Validate.isPositiveOrNull(builder.maxConcurrency, "maxConcurrency"); Validate.isPositiveOrNull(builder.targetThroughputInGbps, "targetThroughputInGbps"); Validate.isPositiveOrNull(builder.minimalPartSizeInBytes, "minimalPartSizeInBytes"); + Validate.isPositiveOrNull(builder.thresholdInBytes, "thresholdInBytes"); S3NativeClientConfiguration.Builder nativeClientBuilder = S3NativeClientConfiguration.builder() @@ -125,7 +130,8 @@ private static S3CrtAsyncHttpClient.Builder initializeS3CrtAsyncHttpClient(Defau .endpointOverride(builder.endpointOverride) .credentialsProvider(builder.credentialsProvider) .readBufferSizeInBytes(builder.readBufferSizeInBytes) - .httpConfiguration(builder.httpConfiguration); + .httpConfiguration(builder.httpConfiguration) + .thresholdInBytes(builder.thresholdInBytes); if (builder.retryConfiguration != null) { nativeClientBuilder.standardRetryOptions( @@ -153,6 +159,7 @@ public static final class DefaultS3CrtClientBuilder implements S3CrtAsyncClientB private List executionInterceptors; private S3CrtRetryConfiguration retryConfiguration; private boolean crossRegionAccessEnabled; + private Long thresholdInBytes; public AwsCredentialsProvider credentialsProvider() { return credentialsProvider; @@ -273,6 +280,12 @@ public S3CrtAsyncClientBuilder crossRegionAccessEnabled(Boolean crossRegionAcces return this; } + @Override + public S3CrtAsyncClientBuilder thresholdInBytes(Long thresholdInBytes) { + this.thresholdInBytes = thresholdInBytes; + return this; + } + @Override public S3CrtAsyncClient build() { return new DefaultS3CrtAsyncClient(this); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClient.java index 149471f3017..f8bf0d809ff 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClient.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClient.java @@ -75,6 +75,7 @@ private S3CrtAsyncHttpClient(Builder builder) { .withCredentialsProvider(s3NativeClientConfiguration.credentialsProvider()) .withClientBootstrap(s3NativeClientConfiguration.clientBootstrap()) .withPartSize(s3NativeClientConfiguration.partSizeBytes()) + .withMultipartUploadThreshold(s3NativeClientConfiguration.thresholdInBytes()) .withComputeContentMd5(false) .withMaxConnections(s3NativeClientConfiguration.maxConcurrency()) .withThroughputTargetGbps(s3NativeClientConfiguration.targetThroughputInGbps()) diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3NativeClientConfiguration.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3NativeClientConfiguration.java index b39cf1ea8e4..fe5bb9a5dba 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3NativeClientConfiguration.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/S3NativeClientConfiguration.java @@ -51,6 +51,7 @@ public class S3NativeClientConfiguration implements SdkAutoCloseable { private final CrtCredentialsProviderAdapter credentialProviderAdapter; private final CredentialsProvider credentialsProvider; private final long partSizeInBytes; + private final long thresholdInBytes; private final double targetThroughputInGbps; private final int maxConcurrency; private final URI endpointOverride; @@ -86,6 +87,8 @@ public S3NativeClientConfiguration(Builder builder) { this.partSizeInBytes = builder.partSizeInBytes == null ? DEFAULT_PART_SIZE_IN_BYTES : builder.partSizeInBytes; + this.thresholdInBytes = builder.thresholdInBytes == null ? this.partSizeInBytes : + builder.thresholdInBytes; this.targetThroughputInGbps = builder.targetThroughputInGbps == null ? DEFAULT_TARGET_THROUGHPUT_IN_GBPS : builder.targetThroughputInGbps; @@ -144,6 +147,10 @@ public long partSizeBytes() { return partSizeInBytes; } + public long thresholdInBytes() { + return thresholdInBytes; + } + public double targetThroughputInGbps() { return targetThroughputInGbps; } @@ -187,6 +194,7 @@ public static final class Builder { private S3CrtHttpConfiguration httpConfiguration; private StandardRetryOptions standardRetryOptions; + private Long thresholdInBytes; private Builder() { } @@ -247,5 +255,10 @@ public Builder standardRetryOptions(StandardRetryOptions standardRetryOptions) { this.standardRetryOptions = standardRetryOptions; return this; } + + public Builder thresholdInBytes(Long thresholdInBytes) { + this.thresholdInBytes = thresholdInBytes; + return this; + } } } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/UploadPartCopyRequestIterable.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/UploadPartCopyRequestIterable.java index 84d3c6ac530..da8eea8fc64 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/UploadPartCopyRequestIterable.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/UploadPartCopyRequestIterable.java @@ -19,6 +19,7 @@ import java.util.NoSuchElementException; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.pagination.sync.SdkIterable; +import software.amazon.awssdk.services.s3.internal.multipart.SdkPojoConversionUtils; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; @@ -65,10 +66,10 @@ public UploadPartCopyRequest next() { long partSize = Math.min(optimalPartSize, remainingBytes); String range = range(partSize); UploadPartCopyRequest uploadPartCopyRequest = - CopyRequestConversionUtils.toUploadPartCopyRequest(copyObjectRequest, - partNumber, - uploadId, - range); + SdkPojoConversionUtils.toUploadPartCopyRequest(copyObjectRequest, + partNumber, + uploadId, + range); partNumber++; offset += partSize; remainingBytes -= partSize; diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptor.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/StreamingRequestInterceptor.java similarity index 85% rename from services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptor.java rename to services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/StreamingRequestInterceptor.java index 92859b96dea..82a47cee6f3 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptor.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/handlers/StreamingRequestInterceptor.java @@ -21,17 +21,18 @@ import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; /** * Interceptor to add an 'Expect: 100-continue' header to the HTTP Request if it represents a PUT Object request. */ @SdkInternalApi //TODO: This should be generalized for all streaming requests -public final class PutObjectInterceptor implements ExecutionInterceptor { +public final class StreamingRequestInterceptor implements ExecutionInterceptor { @Override public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { - if (context.request() instanceof PutObjectRequest) { + if (context.request() instanceof PutObjectRequest || context.request() instanceof UploadPartRequest) { return context.httpRequest().toBuilder().putHeader("Expect", "100-continue").build(); } return context.httpRequest(); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/CopyObjectHelper.java similarity index 71% rename from services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelper.java rename to services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/CopyObjectHelper.java index a26c5ab62c5..afb1ca0e4e8 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/CopyObjectHelper.java @@ -13,21 +13,17 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.s3.internal.crt; +package software.amazon.awssdk.services.s3.internal.multipart; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; import java.util.concurrent.atomic.AtomicReferenceArray; -import java.util.function.BiFunction; -import java.util.function.Supplier; import java.util.stream.IntStream; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.internal.crt.UploadPartCopyRequestIterable; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; @@ -50,17 +46,18 @@ public final class CopyObjectHelper { private static final Logger log = Logger.loggerFor(S3AsyncClient.class); - /** - * The max number of parts on S3 side is 10,000 - */ - private static final long MAX_UPLOAD_PARTS = 10_000; - private final S3AsyncClient s3AsyncClient; private final long partSizeInBytes; + private final GenericMultipartHelper genericMultipartHelper; + private final long uploadThreshold; - public CopyObjectHelper(S3AsyncClient s3AsyncClient, long partSizeInBytes) { + public CopyObjectHelper(S3AsyncClient s3AsyncClient, long partSizeInBytes, long uploadThreshold) { this.s3AsyncClient = s3AsyncClient; this.partSizeInBytes = partSizeInBytes; + this.genericMultipartHelper = new GenericMultipartHelper<>(s3AsyncClient, + SdkPojoConversionUtils::toAbortMultipartUploadRequest, + SdkPojoConversionUtils::toCopyObjectResponse); + this.uploadThreshold = uploadThreshold; } public CompletableFuture copyObject(CopyObjectRequest copyObjectRequest) { @@ -69,14 +66,15 @@ public CompletableFuture copyObject(CopyObjectRequest copyOb try { CompletableFuture headFuture = - s3AsyncClient.headObject(CopyRequestConversionUtils.toHeadObjectRequest(copyObjectRequest)); + s3AsyncClient.headObject(SdkPojoConversionUtils.toHeadObjectRequest(copyObjectRequest)); // Ensure cancellations are forwarded to the head future CompletableFutureUtils.forwardExceptionTo(returnFuture, headFuture); headFuture.whenComplete((headObjectResponse, throwable) -> { if (throwable != null) { - handleException(returnFuture, () -> "Failed to retrieve metadata from the source object", throwable); + genericMultipartHelper.handleException(returnFuture, () -> "Failed to retrieve metadata from the source " + + "object", throwable); } else { doCopyObject(copyObjectRequest, returnFuture, headObjectResponse); } @@ -92,7 +90,7 @@ private void doCopyObject(CopyObjectRequest copyObjectRequest, CompletableFuture HeadObjectResponse headObjectResponse) { Long contentLength = headObjectResponse.contentLength(); - if (contentLength <= partSizeInBytes) { + if (contentLength <= partSizeInBytes || contentLength <= uploadThreshold) { log.debug(() -> "Starting the copy as a single copy part request"); copyInOneChunk(copyObjectRequest, returnFuture); } else { @@ -105,7 +103,7 @@ private void copyInParts(CopyObjectRequest copyObjectRequest, Long contentLength, CompletableFuture returnFuture) { - CreateMultipartUploadRequest request = CopyRequestConversionUtils.toCreateMultipartUploadRequest(copyObjectRequest); + CreateMultipartUploadRequest request = SdkPojoConversionUtils.toCreateMultipartUploadRequest(copyObjectRequest); CompletableFuture createMultipartUploadFuture = s3AsyncClient.createMultipartUpload(request); @@ -114,7 +112,7 @@ private void copyInParts(CopyObjectRequest copyObjectRequest, createMultipartUploadFuture.whenComplete((createMultipartUploadResponse, throwable) -> { if (throwable != null) { - handleException(returnFuture, () -> "Failed to initiate multipart upload", throwable); + genericMultipartHelper.handleException(returnFuture, () -> "Failed to initiate multipart upload", throwable); } else { log.debug(() -> "Initiated new multipart upload, uploadId: " + createMultipartUploadResponse.uploadId()); doCopyInParts(copyObjectRequest, contentLength, returnFuture, createMultipartUploadResponse.uploadId()); @@ -122,17 +120,18 @@ private void copyInParts(CopyObjectRequest copyObjectRequest, }); } - private int determinePartCount(long contentLength, long partSize) { - return (int) Math.ceil(contentLength / (double) partSize); - } - private void doCopyInParts(CopyObjectRequest copyObjectRequest, Long contentLength, CompletableFuture returnFuture, String uploadId) { - long optimalPartSize = calculateOptimalPartSizeForCopy(contentLength); - int partCount = determinePartCount(contentLength, optimalPartSize); + long optimalPartSize = genericMultipartHelper.calculateOptimalPartSizeFor(contentLength, partSizeInBytes); + + int partCount = genericMultipartHelper.determinePartCount(contentLength, optimalPartSize); + if (optimalPartSize > partSizeInBytes) { + log.debug(() -> String.format("Configured partSize is %d, but using %d to prevent reaching maximum number of parts " + + "allowed", partSizeInBytes, optimalPartSize)); + } log.debug(() -> String.format("Starting multipart copy with partCount: %s, optimalPartSize: %s", partCount, optimalPartSize)); @@ -147,32 +146,15 @@ private void doCopyInParts(CopyObjectRequest copyObjectRequest, optimalPartSize); CompletableFutureUtils.allOfExceptionForwarded(futures.toArray(new CompletableFuture[0])) .thenCompose(ignore -> completeMultipartUpload(copyObjectRequest, uploadId, completedParts)) - .handle(handleExceptionOrResponse(copyObjectRequest, returnFuture, uploadId)) + .handle(genericMultipartHelper.handleExceptionOrResponse(copyObjectRequest, returnFuture, + uploadId)) .exceptionally(throwable -> { - handleException(returnFuture, () -> "Unexpected exception occurred", throwable); + genericMultipartHelper.handleException(returnFuture, () -> "Unexpected exception occurred", + throwable); return null; }); } - private BiFunction handleExceptionOrResponse( - CopyObjectRequest copyObjectRequest, - CompletableFuture returnFuture, - String uploadId) { - - return (completeMultipartUploadResponse, throwable) -> { - if (throwable != null) { - cleanUpParts(copyObjectRequest, uploadId); - handleException(returnFuture, () -> "Failed to send multipart copy requests.", - throwable); - } else { - returnFuture.complete(CopyRequestConversionUtils.toCopyObjectResponse( - completeMultipartUploadResponse)); - } - - return null; - }; - } - private CompletableFuture completeMultipartUpload( CopyObjectRequest copyObjectRequest, String uploadId, AtomicReferenceArray completedParts) { log.debug(() -> String.format("Sending completeMultipartUploadRequest, uploadId: %s", @@ -193,39 +175,9 @@ private CompletableFuture completeMultipartUplo .sseCustomerKey(copyObjectRequest.sseCustomerKey()) .sseCustomerKeyMD5(copyObjectRequest.sseCustomerKeyMD5()) .build(); - return s3AsyncClient.completeMultipartUpload(completeMultipartUploadRequest); } - private void cleanUpParts(CopyObjectRequest copyObjectRequest, String uploadId) { - AbortMultipartUploadRequest abortMultipartUploadRequest = - CopyRequestConversionUtils.toAbortMultipartUploadRequest(copyObjectRequest, uploadId); - s3AsyncClient.abortMultipartUpload(abortMultipartUploadRequest) - .exceptionally(throwable -> { - log.warn(() -> String.format("Failed to abort previous multipart upload " - + "(id: %s)" - + ". You may need to call " - + "S3AsyncClient#abortMultiPartUpload to " - + "free all storage consumed by" - + " all parts. ", - uploadId), throwable); - return null; - }); - } - - private static void handleException(CompletableFuture returnFuture, - Supplier message, - Throwable throwable) { - Throwable cause = throwable instanceof CompletionException ? throwable.getCause() : throwable; - - if (cause instanceof Error) { - returnFuture.completeExceptionally(cause); - } else { - SdkClientException exception = SdkClientException.create(message.get(), cause); - returnFuture.completeExceptionally(exception); - } - } - private List> sendUploadPartCopyRequests(CopyObjectRequest copyObjectRequest, long contentLength, String uploadId, @@ -253,7 +205,8 @@ private void sendIndividualUploadPartCopy(String uploadId, log.debug(() -> "Sending uploadPartCopyRequest with range: " + uploadPartCopyRequest.copySourceRange() + " uploadId: " + uploadId); - CompletableFuture uploadPartCopyFuture = s3AsyncClient.uploadPartCopy(uploadPartCopyRequest); + CompletableFuture uploadPartCopyFuture = + s3AsyncClient.uploadPartCopy(uploadPartCopyRequest); CompletableFuture convertFuture = uploadPartCopyFuture.thenApply(uploadPartCopyResponse -> @@ -268,23 +221,13 @@ private static CompletedPart convertUploadPartCopyResponse(AtomicReferenceArray< UploadPartCopyResponse uploadPartCopyResponse) { CopyPartResult copyPartResult = uploadPartCopyResponse.copyPartResult(); CompletedPart completedPart = - CopyRequestConversionUtils.toCompletedPart(copyPartResult, - partNumber); + SdkPojoConversionUtils.toCompletedPart(copyPartResult, + partNumber); completedParts.set(partNumber - 1, completedPart); return completedPart; } - /** - * Calculates the optimal part size of each part request if the copy operation is carried out as multipart copy. - */ - private long calculateOptimalPartSizeForCopy(long contentLengthOfSource) { - double optimalPartSize = contentLengthOfSource / (double) MAX_UPLOAD_PARTS; - - optimalPartSize = Math.ceil(optimalPartSize); - return (long) Math.max(optimalPartSize, partSizeInBytes); - } - private void copyInOneChunk(CopyObjectRequest copyObjectRequest, CompletableFuture returnFuture) { CompletableFuture copyObjectFuture = diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/GenericMultipartHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/GenericMultipartHelper.java new file mode 100644 index 00000000000..38e76394958 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/GenericMultipartHelper.java @@ -0,0 +1,140 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.IntStream; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.S3Request; +import software.amazon.awssdk.services.s3.model.S3Response; +import software.amazon.awssdk.utils.Logger; + +@SdkInternalApi +public final class GenericMultipartHelper { + private static final Logger log = Logger.loggerFor(GenericMultipartHelper.class); + /** + * The max number of parts on S3 side is 10,000 + */ + private static final long MAX_UPLOAD_PARTS = 10_000; + + private final S3AsyncClient s3AsyncClient; + private final Function abortMultipartUploadRequestConverter; + private final Function responseConverter; + + public GenericMultipartHelper(S3AsyncClient s3AsyncClient, + Function abortMultipartUploadRequestConverter, + Function responseConverter) { + this.s3AsyncClient = s3AsyncClient; + this.abortMultipartUploadRequestConverter = abortMultipartUploadRequestConverter; + this.responseConverter = responseConverter; + } + + public void handleException(CompletableFuture returnFuture, + Supplier message, + Throwable throwable) { + Throwable cause = throwable instanceof CompletionException ? throwable.getCause() : throwable; + + if (cause instanceof Error) { + returnFuture.completeExceptionally(cause); + } else { + SdkClientException exception = SdkClientException.create(message.get(), cause); + returnFuture.completeExceptionally(exception); + } + } + + public long calculateOptimalPartSizeFor(long contentLengthOfSource, long partSizeInBytes) { + double optimalPartSize = contentLengthOfSource / (double) MAX_UPLOAD_PARTS; + + optimalPartSize = Math.ceil(optimalPartSize); + return (long) Math.max(optimalPartSize, partSizeInBytes); + } + + public int determinePartCount(long contentLength, long partSize) { + return (int) Math.ceil(contentLength / (double) partSize); + } + + public CompletableFuture completeMultipartUpload( + RequestT request, String uploadId, CompletedPart[] parts) { + log.debug(() -> String.format("Sending completeMultipartUploadRequest, uploadId: %s", + uploadId)); + CompleteMultipartUploadRequest completeMultipartUploadRequest = + CompleteMultipartUploadRequest.builder() + .bucket(request.getValueForField("Bucket", String.class).get()) + .key(request.getValueForField("Key", String.class).get()) + .uploadId(uploadId) + .multipartUpload(CompletedMultipartUpload.builder() + .parts(parts) + .build()) + .build(); + return s3AsyncClient.completeMultipartUpload(completeMultipartUploadRequest); + } + + public CompletableFuture completeMultipartUpload( + RequestT request, String uploadId, AtomicReferenceArray completedParts) { + CompletedPart[] parts = + IntStream.range(0, completedParts.length()) + .mapToObj(completedParts::get) + .toArray(CompletedPart[]::new); + return completeMultipartUpload(request, uploadId, parts); + } + + public BiFunction handleExceptionOrResponse( + RequestT request, + CompletableFuture returnFuture, + String uploadId) { + + return (completeMultipartUploadResponse, throwable) -> { + if (throwable != null) { + cleanUpParts(uploadId, abortMultipartUploadRequestConverter.apply(request)); + handleException(returnFuture, () -> "Failed to send multipart requests", + throwable); + } else { + returnFuture.complete(responseConverter.apply( + completeMultipartUploadResponse)); + } + + return null; + }; + } + + public void cleanUpParts(String uploadId, AbortMultipartUploadRequest.Builder abortMultipartUploadRequest) { + log.debug(() -> "Aborting multipart upload: " + uploadId); + AbortMultipartUploadRequest request = abortMultipartUploadRequest.uploadId(uploadId).build(); + s3AsyncClient.abortMultipartUpload(request) + .exceptionally(throwable -> { + log.warn(() -> String.format("Failed to abort previous multipart upload " + + "(id: %s)" + + ". You may need to call " + + "S3AsyncClient#abortMultiPartUpload to " + + "free all storage consumed by" + + " all parts. ", + uploadId), throwable); + return null; + }); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java new file mode 100644 index 00000000000..65b26ddec97 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + + +import java.util.concurrent.CompletableFuture; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.ApiName; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.services.s3.DelegatingS3AsyncClient; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.internal.UserAgentUtils; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.CopyObjectResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Request; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; +import software.amazon.awssdk.utils.Validate; + +/** + * An {@link S3AsyncClient} that automatically converts put, copy requests to their respective multipart call. Note: get is not + * yet supported. + * + * @see MultipartConfiguration + */ +@SdkInternalApi +public final class MultipartS3AsyncClient extends DelegatingS3AsyncClient { + + private static final ApiName USER_AGENT_API_NAME = ApiName.builder().name("hll").version("s3Multipart").build(); + + private static final long DEFAULT_MIN_PART_SIZE = 8L * 1024 * 1024; + private static final long DEFAULT_THRESHOLD = 8L * 1024 * 1024; + private static final long DEFAULT_API_CALL_BUFFER_SIZE = DEFAULT_MIN_PART_SIZE * 4; + + private final UploadObjectHelper mpuHelper; + private final CopyObjectHelper copyObjectHelper; + + private MultipartS3AsyncClient(S3AsyncClient delegate, MultipartConfiguration multipartConfiguration) { + super(delegate); + MultipartConfiguration validConfiguration = Validate.getOrDefault(multipartConfiguration, + MultipartConfiguration.builder()::build); + long minPartSizeInBytes = Validate.getOrDefault(validConfiguration.minimumPartSizeInBytes(), + () -> DEFAULT_MIN_PART_SIZE); + long threshold = Validate.getOrDefault(validConfiguration.thresholdInBytes(), + () -> DEFAULT_THRESHOLD); + long apiCallBufferSizeInBytes = Validate.getOrDefault(validConfiguration.apiCallBufferSizeInBytes(), + () -> computeApiCallBufferSize(validConfiguration)); + mpuHelper = new UploadObjectHelper(delegate, minPartSizeInBytes, threshold, apiCallBufferSizeInBytes); + copyObjectHelper = new CopyObjectHelper(delegate, minPartSizeInBytes, threshold); + } + + private long computeApiCallBufferSize(MultipartConfiguration multipartConfiguration) { + return multipartConfiguration.minimumPartSizeInBytes() != null ? multipartConfiguration.minimumPartSizeInBytes() * 4 + : DEFAULT_API_CALL_BUFFER_SIZE; + } + + @Override + public CompletableFuture putObject(PutObjectRequest putObjectRequest, AsyncRequestBody requestBody) { + return mpuHelper.uploadObject(putObjectRequest, requestBody); + } + + @Override + public CompletableFuture copyObject(CopyObjectRequest copyObjectRequest) { + return copyObjectHelper.copyObject(copyObjectRequest); + } + + @Override + public CompletableFuture getObject( + GetObjectRequest getObjectRequest, AsyncResponseTransformer asyncResponseTransformer) { + throw new UnsupportedOperationException( + "Multipart download is not yet supported. Instead use the CRT based S3 client for multipart download."); + } + + @Override + public void close() { + delegate().close(); + } + + public static MultipartS3AsyncClient create(S3AsyncClient client, MultipartConfiguration multipartConfiguration) { + S3AsyncClient clientWithUserAgent = new DelegatingS3AsyncClient(client) { + @Override + protected CompletableFuture invokeOperation(T request, Function> operation) { + T requestWithUserAgent = UserAgentUtils.applyUserAgentInfo(request, c -> c.addApiName(USER_AGENT_API_NAME)); + return operation.apply(requestWithUserAgent); + } + }; + return new MultipartS3AsyncClient(clientWithUserAgent, multipartConfiguration); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartUploadHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartUploadHelper.java new file mode 100644 index 00000000000..9754d284f5b --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartUploadHelper.java @@ -0,0 +1,147 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + + +import static software.amazon.awssdk.services.s3.internal.multipart.SdkPojoConversionUtils.toAbortMultipartUploadRequest; + +import java.util.Collection; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Pair; + +/** + * A base class contains common logic used by {@link UploadWithUnknownContentLengthHelper} and + * {@link UploadWithKnownContentLengthHelper}. + */ +@SdkInternalApi +public final class MultipartUploadHelper { + private static final Logger log = Logger.loggerFor(MultipartUploadHelper.class); + + private final S3AsyncClient s3AsyncClient; + private final long partSizeInBytes; + private final GenericMultipartHelper genericMultipartHelper; + + private final long maxMemoryUsageInBytes; + private final long multipartUploadThresholdInBytes; + + public MultipartUploadHelper(S3AsyncClient s3AsyncClient, + long partSizeInBytes, + long multipartUploadThresholdInBytes, + long maxMemoryUsageInBytes) { + this.s3AsyncClient = s3AsyncClient; + this.partSizeInBytes = partSizeInBytes; + this.genericMultipartHelper = new GenericMultipartHelper<>(s3AsyncClient, + SdkPojoConversionUtils::toAbortMultipartUploadRequest, + SdkPojoConversionUtils::toPutObjectResponse); + this.maxMemoryUsageInBytes = maxMemoryUsageInBytes; + this.multipartUploadThresholdInBytes = multipartUploadThresholdInBytes; + } + + CompletableFuture createMultipartUpload(PutObjectRequest putObjectRequest, + CompletableFuture returnFuture) { + CreateMultipartUploadRequest request = SdkPojoConversionUtils.toCreateMultipartUploadRequest(putObjectRequest); + CompletableFuture createMultipartUploadFuture = + s3AsyncClient.createMultipartUpload(request); + + // Ensure cancellations are forwarded to the createMultipartUploadFuture future + CompletableFutureUtils.forwardExceptionTo(returnFuture, createMultipartUploadFuture); + return createMultipartUploadFuture; + } + + void completeMultipartUpload(CompletableFuture returnFuture, + String uploadId, + CompletedPart[] completedParts, + PutObjectRequest putObjectRequest) { + genericMultipartHelper.completeMultipartUpload(putObjectRequest, + uploadId, + completedParts) + .handle(genericMultipartHelper.handleExceptionOrResponse(putObjectRequest, returnFuture, + uploadId)) + .exceptionally(throwable -> { + genericMultipartHelper.handleException(returnFuture, () -> "Unexpected exception occurred", + throwable); + return null; + }); + } + + CompletableFuture sendIndividualUploadPartRequest(String uploadId, + Consumer completedPartsConsumer, + Collection> futures, + Pair requestPair) { + UploadPartRequest uploadPartRequest = requestPair.left(); + Integer partNumber = uploadPartRequest.partNumber(); + log.debug(() -> "Sending uploadPartRequest: " + uploadPartRequest.partNumber() + " uploadId: " + uploadId + " " + + "contentLength " + requestPair.right().contentLength()); + + CompletableFuture uploadPartFuture = s3AsyncClient.uploadPart(uploadPartRequest, + requestPair.right()); + + CompletableFuture convertFuture = + uploadPartFuture.thenApply(uploadPartResponse -> convertUploadPartResponse(completedPartsConsumer, partNumber, + uploadPartResponse)); + futures.add(convertFuture); + CompletableFutureUtils.forwardExceptionTo(convertFuture, uploadPartFuture); + return convertFuture; + } + + void failRequestsElegantly(Collection> futures, + Throwable t, + String uploadId, + CompletableFuture returnFuture, + PutObjectRequest putObjectRequest) { + genericMultipartHelper.handleException(returnFuture, () -> "Failed to send multipart upload requests", t); + if (uploadId != null) { + genericMultipartHelper.cleanUpParts(uploadId, toAbortMultipartUploadRequest(putObjectRequest)); + } + cancelingOtherOngoingRequests(futures, t); + } + + static void cancelingOtherOngoingRequests(Collection> futures, Throwable t) { + log.trace(() -> "cancelling other ongoing requests " + futures.size()); + futures.forEach(f -> f.completeExceptionally(t)); + } + + static CompletedPart convertUploadPartResponse(Consumer consumer, + Integer partNumber, + UploadPartResponse uploadPartResponse) { + CompletedPart completedPart = SdkPojoConversionUtils.toCompletedPart(uploadPartResponse, partNumber); + + consumer.accept(completedPart); + return completedPart; + } + + void uploadInOneChunk(PutObjectRequest putObjectRequest, + AsyncRequestBody asyncRequestBody, + CompletableFuture returnFuture) { + CompletableFuture putObjectResponseCompletableFuture = s3AsyncClient.putObject(putObjectRequest, + asyncRequestBody); + CompletableFutureUtils.forwardExceptionTo(returnFuture, putObjectResponseCompletableFuture); + CompletableFutureUtils.forwardResultTo(putObjectResponseCompletableFuture, returnFuture); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtils.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtils.java new file mode 100644 index 00000000000..25fde18cada --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtils.java @@ -0,0 +1,195 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkField; +import software.amazon.awssdk.core.SdkPojo; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.CopyObjectResponse; +import software.amazon.awssdk.services.s3.model.CopyObjectResult; +import software.amazon.awssdk.services.s3.model.CopyPartResult; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.utils.Logger; + +/** + * Request conversion utility method for POJO classes associated with multipart feature. + */ +@SdkInternalApi +public final class SdkPojoConversionUtils { + private static final Logger log = Logger.loggerFor(SdkPojoConversionUtils.class); + + private static final HashSet PUT_OBJECT_REQUEST_TO_UPLOAD_PART_FIELDS_TO_IGNORE = + new HashSet<>(Arrays.asList("ChecksumSHA1", "ChecksumSHA256", "ContentMD5", "ChecksumCRC32C", "ChecksumCRC32")); + + private SdkPojoConversionUtils() { + } + + public static UploadPartRequest toUploadPartRequest(PutObjectRequest putObjectRequest, int partNumber, String uploadId) { + + UploadPartRequest.Builder builder = UploadPartRequest.builder(); + + setSdkFields(builder, putObjectRequest, PUT_OBJECT_REQUEST_TO_UPLOAD_PART_FIELDS_TO_IGNORE); + + return builder.uploadId(uploadId).partNumber(partNumber).build(); + } + + public static CreateMultipartUploadRequest toCreateMultipartUploadRequest(PutObjectRequest putObjectRequest) { + + CreateMultipartUploadRequest.Builder builder = CreateMultipartUploadRequest.builder(); + setSdkFields(builder, putObjectRequest); + return builder.build(); + } + + public static HeadObjectRequest toHeadObjectRequest(CopyObjectRequest copyObjectRequest) { + + // We can't set SdkFields directly because the fields in CopyObjectRequest do not match 100% with the ones in + // HeadObjectRequest + return HeadObjectRequest.builder() + .bucket(copyObjectRequest.sourceBucket()) + .key(copyObjectRequest.sourceKey()) + .versionId(copyObjectRequest.sourceVersionId()) + .ifMatch(copyObjectRequest.copySourceIfMatch()) + .ifModifiedSince(copyObjectRequest.copySourceIfModifiedSince()) + .ifNoneMatch(copyObjectRequest.copySourceIfNoneMatch()) + .ifUnmodifiedSince(copyObjectRequest.copySourceIfUnmodifiedSince()) + .expectedBucketOwner(copyObjectRequest.expectedSourceBucketOwner()) + .sseCustomerAlgorithm(copyObjectRequest.copySourceSSECustomerAlgorithm()) + .sseCustomerKey(copyObjectRequest.copySourceSSECustomerKey()) + .sseCustomerKeyMD5(copyObjectRequest.copySourceSSECustomerKeyMD5()) + .build(); + } + + public static CompletedPart toCompletedPart(CopyPartResult copyPartResult, int partNumber) { + CompletedPart.Builder builder = CompletedPart.builder(); + + setSdkFields(builder, copyPartResult); + return builder.partNumber(partNumber).build(); + } + + public static CompletedPart toCompletedPart(UploadPartResponse partResponse, int partNumber) { + CompletedPart.Builder builder = CompletedPart.builder(); + setSdkFields(builder, partResponse); + return builder.partNumber(partNumber).build(); + } + + private static void setSdkFields(SdkPojo targetBuilder, SdkPojo sourceObject) { + setSdkFields(targetBuilder, sourceObject, new HashSet<>()); + } + + private static void setSdkFields(SdkPojo targetBuilder, SdkPojo sourceObject, Set fieldsToIgnore) { + Map sourceFields = retrieveSdkFields(sourceObject, sourceObject.sdkFields()); + List> targetSdkFields = targetBuilder.sdkFields(); + + for (SdkField field : targetSdkFields) { + if (fieldsToIgnore.contains(field.memberName())) { + continue; + } + field.set(targetBuilder, sourceFields.getOrDefault(field.memberName(), null)); + } + } + + public static CreateMultipartUploadRequest toCreateMultipartUploadRequest(CopyObjectRequest copyObjectRequest) { + CreateMultipartUploadRequest.Builder builder = CreateMultipartUploadRequest.builder(); + + setSdkFields(builder, copyObjectRequest); + builder.bucket(copyObjectRequest.destinationBucket()); + builder.key(copyObjectRequest.destinationKey()); + return builder.build(); + } + + public static CopyObjectResponse toCopyObjectResponse(CompleteMultipartUploadResponse response) { + CopyObjectResponse.Builder builder = CopyObjectResponse.builder(); + + setSdkFields(builder, response); + + builder.responseMetadata(response.responseMetadata()); + builder.sdkHttpResponse(response.sdkHttpResponse()); + + return builder.copyObjectResult(toCopyObjectResult(response)) + .build(); + } + + private static CopyObjectResult toCopyObjectResult(CompleteMultipartUploadResponse response) { + CopyObjectResult.Builder builder = CopyObjectResult.builder(); + + setSdkFields(builder, response); + return builder.build(); + } + + public static AbortMultipartUploadRequest.Builder toAbortMultipartUploadRequest(CopyObjectRequest copyObjectRequest) { + AbortMultipartUploadRequest.Builder builder = AbortMultipartUploadRequest.builder(); + setSdkFields(builder, copyObjectRequest); + builder.bucket(copyObjectRequest.destinationBucket()); + builder.key(copyObjectRequest.destinationKey()); + return builder; + } + + public static AbortMultipartUploadRequest.Builder toAbortMultipartUploadRequest(PutObjectRequest putObjectRequest) { + AbortMultipartUploadRequest.Builder builder = AbortMultipartUploadRequest.builder(); + setSdkFields(builder, putObjectRequest); + return builder; + } + + public static UploadPartCopyRequest toUploadPartCopyRequest(CopyObjectRequest copyObjectRequest, + int partNumber, + String uploadId, + String range) { + UploadPartCopyRequest.Builder builder = UploadPartCopyRequest.builder(); + setSdkFields(builder, copyObjectRequest); + return builder.copySourceRange(range) + .partNumber(partNumber) + .uploadId(uploadId) + .bucket(copyObjectRequest.destinationBucket()) + .key(copyObjectRequest.destinationKey()) + .build(); + } + + public static PutObjectResponse toPutObjectResponse(CompleteMultipartUploadResponse response) { + + PutObjectResponse.Builder builder = PutObjectResponse.builder(); + + setSdkFields(builder, response); + + builder.responseMetadata(response.responseMetadata()); + builder.sdkHttpResponse(response.sdkHttpResponse()); + + return builder.build(); + } + + private static Map retrieveSdkFields(SdkPojo sourceObject, List> sdkFields) { + return sdkFields.stream().collect( + HashMap::new, + (map, field) -> map.put(field.memberName(), + field.getValueOrDefault(sourceObject)), + Map::putAll); + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java new file mode 100644 index 00000000000..0700e8ade5f --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.utils.Logger; + +/** + * An internal helper class that automatically uses multipart upload based on the size of the object. + */ +@SdkInternalApi +public final class UploadObjectHelper { + private static final Logger log = Logger.loggerFor(UploadObjectHelper.class); + + private final S3AsyncClient s3AsyncClient; + private final long partSizeInBytes; + private final GenericMultipartHelper genericMultipartHelper; + + private final long maxMemoryUsageInBytes; + private final long multipartUploadThresholdInBytes; + private final UploadWithKnownContentLengthHelper uploadWithKnownContentLength; + private final UploadWithUnknownContentLengthHelper uploadWithUnknownContentLength; + + public UploadObjectHelper(S3AsyncClient s3AsyncClient, + long partSizeInBytes, + long multipartUploadThresholdInBytes, + long maxMemoryUsageInBytes) { + this.s3AsyncClient = s3AsyncClient; + this.partSizeInBytes = partSizeInBytes; + this.genericMultipartHelper = new GenericMultipartHelper<>(s3AsyncClient, + SdkPojoConversionUtils::toAbortMultipartUploadRequest, + SdkPojoConversionUtils::toPutObjectResponse); + this.maxMemoryUsageInBytes = maxMemoryUsageInBytes; + this.multipartUploadThresholdInBytes = multipartUploadThresholdInBytes; + this.uploadWithKnownContentLength = new UploadWithKnownContentLengthHelper(s3AsyncClient, + partSizeInBytes, + multipartUploadThresholdInBytes, + maxMemoryUsageInBytes); + this.uploadWithUnknownContentLength = new UploadWithUnknownContentLengthHelper(s3AsyncClient, + partSizeInBytes, + multipartUploadThresholdInBytes, + maxMemoryUsageInBytes); + } + + public CompletableFuture uploadObject(PutObjectRequest putObjectRequest, + AsyncRequestBody asyncRequestBody) { + Long contentLength = asyncRequestBody.contentLength().orElseGet(putObjectRequest::contentLength); + + if (contentLength == null) { + return uploadWithUnknownContentLength.uploadObject(putObjectRequest, asyncRequestBody); + } else { + return uploadWithKnownContentLength.uploadObject(putObjectRequest, asyncRequestBody, contentLength.longValue()); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java new file mode 100644 index 00000000000..f7d199ac3aa --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java @@ -0,0 +1,256 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + + +import java.util.Collection; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.function.Consumer; +import java.util.stream.IntStream; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Pair; + +/** + * An internal helper class that automatically uses multipart upload based on the size of the object. + */ +@SdkInternalApi +public final class UploadWithKnownContentLengthHelper { + private static final Logger log = Logger.loggerFor(UploadWithKnownContentLengthHelper.class); + + private final S3AsyncClient s3AsyncClient; + private final long partSizeInBytes; + private final GenericMultipartHelper genericMultipartHelper; + + private final long maxMemoryUsageInBytes; + private final long multipartUploadThresholdInBytes; + private final MultipartUploadHelper multipartUploadHelper; + + public UploadWithKnownContentLengthHelper(S3AsyncClient s3AsyncClient, + long partSizeInBytes, + long multipartUploadThresholdInBytes, + long maxMemoryUsageInBytes) { + this.s3AsyncClient = s3AsyncClient; + this.partSizeInBytes = partSizeInBytes; + this.genericMultipartHelper = new GenericMultipartHelper<>(s3AsyncClient, + SdkPojoConversionUtils::toAbortMultipartUploadRequest, + SdkPojoConversionUtils::toPutObjectResponse); + this.maxMemoryUsageInBytes = maxMemoryUsageInBytes; + this.multipartUploadThresholdInBytes = multipartUploadThresholdInBytes; + this.multipartUploadHelper = new MultipartUploadHelper(s3AsyncClient, partSizeInBytes, multipartUploadThresholdInBytes, + maxMemoryUsageInBytes); + } + + public CompletableFuture uploadObject(PutObjectRequest putObjectRequest, + AsyncRequestBody asyncRequestBody, + long contentLength) { + CompletableFuture returnFuture = new CompletableFuture<>(); + + try { + if (contentLength > multipartUploadThresholdInBytes && contentLength > partSizeInBytes) { + log.debug(() -> "Starting the upload as multipart upload request"); + uploadInParts(putObjectRequest, contentLength, asyncRequestBody, returnFuture); + } else { + log.debug(() -> "Starting the upload as a single upload part request"); + multipartUploadHelper.uploadInOneChunk(putObjectRequest, asyncRequestBody, returnFuture); + } + + } catch (Throwable throwable) { + returnFuture.completeExceptionally(throwable); + } + + return returnFuture; + } + + private void uploadInParts(PutObjectRequest putObjectRequest, long contentLength, AsyncRequestBody asyncRequestBody, + CompletableFuture returnFuture) { + + CompletableFuture createMultipartUploadFuture = + multipartUploadHelper.createMultipartUpload(putObjectRequest, returnFuture); + + createMultipartUploadFuture.whenComplete((createMultipartUploadResponse, throwable) -> { + if (throwable != null) { + genericMultipartHelper.handleException(returnFuture, () -> "Failed to initiate multipart upload", throwable); + } else { + log.debug(() -> "Initiated a new multipart upload, uploadId: " + createMultipartUploadResponse.uploadId()); + doUploadInParts(Pair.of(putObjectRequest, asyncRequestBody), contentLength, returnFuture, + createMultipartUploadResponse.uploadId()); + } + }); + } + + private void doUploadInParts(Pair request, + long contentLength, + CompletableFuture returnFuture, + String uploadId) { + + long optimalPartSize = genericMultipartHelper.calculateOptimalPartSizeFor(contentLength, partSizeInBytes); + int partCount = genericMultipartHelper.determinePartCount(contentLength, optimalPartSize); + if (optimalPartSize > partSizeInBytes) { + log.debug(() -> String.format("Configured partSize is %d, but using %d to prevent reaching maximum number of parts " + + "allowed", partSizeInBytes, optimalPartSize)); + } + + log.debug(() -> String.format("Starting multipart upload with partCount: %d, optimalPartSize: %d", partCount, + optimalPartSize)); + + MpuRequestContext mpuRequestContext = new MpuRequestContext(request, contentLength, optimalPartSize, uploadId); + + request.right() + .split(b -> b.chunkSizeInBytes(mpuRequestContext.partSize) + .bufferSizeInBytes(maxMemoryUsageInBytes)) + .subscribe(new KnownContentLengthAsyncRequestBodySubscriber(mpuRequestContext, + returnFuture)); + } + + private static final class MpuRequestContext { + private final Pair request; + private final long contentLength; + private final long partSize; + + private final String uploadId; + + private MpuRequestContext(Pair request, + long contentLength, + long partSize, + String uploadId) { + this.request = request; + this.contentLength = contentLength; + this.partSize = partSize; + this.uploadId = uploadId; + } + } + + private class KnownContentLengthAsyncRequestBodySubscriber implements Subscriber { + + /** + * The number of AsyncRequestBody has been received but yet to be processed + */ + private final AtomicInteger asyncRequestBodyInFlight = new AtomicInteger(0); + + /** + * Indicates whether CompleteMultipart has been initiated or not. + */ + private final AtomicBoolean completedMultipartInitiated = new AtomicBoolean(false); + + private final AtomicBoolean failureActionInitiated = new AtomicBoolean(false); + + private final AtomicInteger partNumber = new AtomicInteger(1); + + private final AtomicReferenceArray completedParts; + private final String uploadId; + private final Collection> futures = new ConcurrentLinkedQueue<>(); + + private final PutObjectRequest putObjectRequest; + private final CompletableFuture returnFuture; + private Subscription subscription; + + private volatile boolean isDone; + + KnownContentLengthAsyncRequestBodySubscriber(MpuRequestContext mpuRequestContext, + CompletableFuture returnFuture) { + long optimalPartSize = genericMultipartHelper.calculateOptimalPartSizeFor(mpuRequestContext.contentLength, + partSizeInBytes); + int partCount = genericMultipartHelper.determinePartCount(mpuRequestContext.contentLength, optimalPartSize); + this.putObjectRequest = mpuRequestContext.request.left(); + this.returnFuture = returnFuture; + this.completedParts = new AtomicReferenceArray<>(partCount); + this.uploadId = mpuRequestContext.uploadId; + } + + @Override + public void onSubscribe(Subscription s) { + if (this.subscription != null) { + log.warn(() -> "The subscriber has already been subscribed. Cancelling the incoming subscription"); + subscription.cancel(); + return; + } + this.subscription = s; + s.request(1); + returnFuture.whenComplete((r, t) -> { + if (t != null) { + s.cancel(); + multipartUploadHelper.cancelingOtherOngoingRequests(futures, t); + } + }); + } + + @Override + public void onNext(AsyncRequestBody asyncRequestBody) { + log.trace(() -> "Received asyncRequestBody " + asyncRequestBody.contentLength()); + asyncRequestBodyInFlight.incrementAndGet(); + UploadPartRequest uploadRequest = + SdkPojoConversionUtils.toUploadPartRequest(putObjectRequest, + partNumber.getAndIncrement(), + uploadId); + + Consumer completedPartConsumer = completedPart -> completedParts.set(completedPart.partNumber() - 1, + completedPart); + multipartUploadHelper.sendIndividualUploadPartRequest(uploadId, completedPartConsumer, futures, + Pair.of(uploadRequest, asyncRequestBody)) + .whenComplete((r, t) -> { + if (t != null) { + if (failureActionInitiated.compareAndSet(false, true)) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, + putObjectRequest); + } + } else { + completeMultipartUploadIfFinish(asyncRequestBodyInFlight.decrementAndGet()); + } + }); + subscription.request(1); + } + + @Override + public void onError(Throwable t) { + log.debug(() -> "Received onError ", t); + if (failureActionInitiated.compareAndSet(false, true)) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); + } + } + + @Override + public void onComplete() { + log.debug(() -> "Received onComplete()"); + isDone = true; + completeMultipartUploadIfFinish(asyncRequestBodyInFlight.get()); + } + + private void completeMultipartUploadIfFinish(int requestsInFlight) { + if (isDone && requestsInFlight == 0 && completedMultipartInitiated.compareAndSet(false, true)) { + CompletedPart[] parts = + IntStream.range(0, completedParts.length()) + .mapToObj(completedParts::get) + .toArray(CompletedPart[]::new); + multipartUploadHelper.completeMultipartUpload(returnFuture, uploadId, parts, putObjectRequest); + } + } + + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithUnknownContentLengthHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithUnknownContentLengthHelper.java new file mode 100644 index 00000000000..0c8c3c70b51 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithUnknownContentLengthHelper.java @@ -0,0 +1,249 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + + +import java.util.Collection; +import java.util.Comparator; +import java.util.Queue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Pair; + +/** + * An internal helper class that uploads streams with unknown content length. + */ +@SdkInternalApi +public final class UploadWithUnknownContentLengthHelper { + private static final Logger log = Logger.loggerFor(UploadWithUnknownContentLengthHelper.class); + + private final S3AsyncClient s3AsyncClient; + private final long partSizeInBytes; + private final GenericMultipartHelper genericMultipartHelper; + + private final long maxMemoryUsageInBytes; + private final long multipartUploadThresholdInBytes; + + private final MultipartUploadHelper multipartUploadHelper; + + public UploadWithUnknownContentLengthHelper(S3AsyncClient s3AsyncClient, + long partSizeInBytes, + long multipartUploadThresholdInBytes, + long maxMemoryUsageInBytes) { + this.s3AsyncClient = s3AsyncClient; + this.partSizeInBytes = partSizeInBytes; + this.genericMultipartHelper = new GenericMultipartHelper<>(s3AsyncClient, + SdkPojoConversionUtils::toAbortMultipartUploadRequest, + SdkPojoConversionUtils::toPutObjectResponse); + this.maxMemoryUsageInBytes = maxMemoryUsageInBytes; + this.multipartUploadThresholdInBytes = multipartUploadThresholdInBytes; + this.multipartUploadHelper = new MultipartUploadHelper(s3AsyncClient, partSizeInBytes, multipartUploadThresholdInBytes, + maxMemoryUsageInBytes); + } + + public CompletableFuture uploadObject(PutObjectRequest putObjectRequest, + AsyncRequestBody asyncRequestBody) { + CompletableFuture returnFuture = new CompletableFuture<>(); + + SdkPublisher splitAsyncRequestBodyResponse = + asyncRequestBody.split(b -> b.chunkSizeInBytes(partSizeInBytes) + .bufferSizeInBytes(maxMemoryUsageInBytes)); + + splitAsyncRequestBodyResponse.subscribe(new UnknownContentLengthAsyncRequestBodySubscriber(partSizeInBytes, + putObjectRequest, + returnFuture)); + return returnFuture; + } + + private class UnknownContentLengthAsyncRequestBodySubscriber implements Subscriber { + /** + * Indicates whether this is the first async request body or not. + */ + private final AtomicBoolean isFirstAsyncRequestBody = new AtomicBoolean(true); + + /** + * Indicates whether CreateMultipartUpload has been initiated or not + */ + private final AtomicBoolean createMultipartUploadInitiated = new AtomicBoolean(false); + + /** + * Indicates whether CompleteMultipart has been initiated or not. + */ + private final AtomicBoolean completedMultipartInitiated = new AtomicBoolean(false); + + /** + * The number of AsyncRequestBody has been received but yet to be processed + */ + private final AtomicInteger asyncRequestBodyInFlight = new AtomicInteger(0); + + private final AtomicBoolean failureActionInitiated = new AtomicBoolean(false); + + private AtomicInteger partNumber = new AtomicInteger(1); + + private final Queue completedParts = new ConcurrentLinkedQueue<>(); + private final Collection> futures = new ConcurrentLinkedQueue<>(); + + private final CompletableFuture uploadIdFuture = new CompletableFuture<>(); + + private final long maximumChunkSizeInByte; + private final PutObjectRequest putObjectRequest; + private final CompletableFuture returnFuture; + private Subscription subscription; + private AsyncRequestBody firstRequestBody; + + private String uploadId; + private volatile boolean isDone; + + UnknownContentLengthAsyncRequestBodySubscriber(long maximumChunkSizeInByte, + PutObjectRequest putObjectRequest, + CompletableFuture returnFuture) { + this.maximumChunkSizeInByte = maximumChunkSizeInByte; + this.putObjectRequest = putObjectRequest; + this.returnFuture = returnFuture; + } + + @Override + public void onSubscribe(Subscription s) { + if (this.subscription != null) { + log.warn(() -> "The subscriber has already been subscribed. Cancelling the incoming subscription"); + subscription.cancel(); + return; + } + this.subscription = s; + s.request(1); + returnFuture.whenComplete((r, t) -> { + if (t != null) { + s.cancel(); + multipartUploadHelper.cancelingOtherOngoingRequests(futures, t); + } + }); + } + + @Override + public void onNext(AsyncRequestBody asyncRequestBody) { + log.trace(() -> "Received asyncRequestBody " + asyncRequestBody.contentLength()); + asyncRequestBodyInFlight.incrementAndGet(); + + if (isFirstAsyncRequestBody.compareAndSet(true, false)) { + log.trace(() -> "Received first async request body"); + // If this is the first AsyncRequestBody received, request another one because we don't know if there is more + firstRequestBody = asyncRequestBody; + subscription.request(1); + return; + } + + // If there are more than 1 AsyncRequestBodies, then we know we need to upload this + // object using MPU + if (createMultipartUploadInitiated.compareAndSet(false, true)) { + log.debug(() -> "Starting the upload as multipart upload request"); + CompletableFuture createMultipartUploadFuture = + multipartUploadHelper.createMultipartUpload(putObjectRequest, returnFuture); + + createMultipartUploadFuture.whenComplete((createMultipartUploadResponse, throwable) -> { + if (throwable != null) { + genericMultipartHelper.handleException(returnFuture, () -> "Failed to initiate multipart upload", + throwable); + subscription.cancel(); + } else { + uploadId = createMultipartUploadResponse.uploadId(); + log.debug(() -> "Initiated a new multipart upload, uploadId: " + uploadId); + + sendUploadPartRequest(uploadId, firstRequestBody); + sendUploadPartRequest(uploadId, asyncRequestBody); + + // We need to complete the uploadIdFuture *after* the first two requests have been sent + uploadIdFuture.complete(uploadId); + } + }); + CompletableFutureUtils.forwardExceptionTo(returnFuture, createMultipartUploadFuture); + } else { + uploadIdFuture.whenComplete((r, t) -> { + sendUploadPartRequest(uploadId, asyncRequestBody); + }); + } + } + + private void sendUploadPartRequest(String uploadId, AsyncRequestBody asyncRequestBody) { + multipartUploadHelper.sendIndividualUploadPartRequest(uploadId, completedParts::add, futures, + uploadPart(asyncRequestBody)) + .whenComplete((r, t) -> { + if (t != null) { + if (failureActionInitiated.compareAndSet(false, true)) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); + } + } else { + completeMultipartUploadIfFinish(asyncRequestBodyInFlight.decrementAndGet()); + } + }); + synchronized (this) { + subscription.request(1); + }; + } + + private Pair uploadPart(AsyncRequestBody asyncRequestBody) { + UploadPartRequest uploadRequest = + SdkPojoConversionUtils.toUploadPartRequest(putObjectRequest, + partNumber.getAndIncrement(), + uploadId); + return Pair.of(uploadRequest, asyncRequestBody); + } + + @Override + public void onError(Throwable t) { + log.debug(() -> "Received onError() ", t); + if (failureActionInitiated.compareAndSet(false, true)) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); + } + } + + @Override + public void onComplete() { + log.debug(() -> "Received onComplete()"); + // If CreateMultipartUpload has not been initiated at this point, we know this is a single object upload + if (createMultipartUploadInitiated.get() == false) { + log.debug(() -> "Starting the upload as a single object upload request"); + multipartUploadHelper.uploadInOneChunk(putObjectRequest, firstRequestBody, returnFuture); + } else { + isDone = true; + completeMultipartUploadIfFinish(asyncRequestBodyInFlight.get()); + } + } + + private void completeMultipartUploadIfFinish(int requestsInFlight) { + if (isDone && requestsInFlight == 0 && completedMultipartInitiated.compareAndSet(false, true)) { + CompletedPart[] parts = completedParts.stream() + .sorted(Comparator.comparingInt(CompletedPart::partNumber)) + .toArray(CompletedPart[]::new); + multipartUploadHelper.completeMultipartUpload(returnFuture, uploadId, parts, putObjectRequest); + } + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java new file mode 100644 index 00000000000..28e418974db --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java @@ -0,0 +1,199 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.multipart; + +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; +import software.amazon.awssdk.services.s3.model.CopyObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Class that hold configuration properties related to multipart operation for a {@link S3AsyncClient}. Passing this class to the + * {@link S3AsyncClientBuilder#multipartConfiguration(MultipartConfiguration)} will enable automatic conversion of + * {@link S3AsyncClient#putObject(Consumer, AsyncRequestBody)}, {@link S3AsyncClient#copyObject(CopyObjectRequest)} to their + * respective multipart operation. + *

    + * Note: The multipart operation for {@link S3AsyncClient#getObject(GetObjectRequest, AsyncResponseTransformer)} is + * temporarily disabled and will result in throwing a {@link UnsupportedOperationException} if called when configured for + * multipart operation. + */ +@SdkPublicApi +public final class MultipartConfiguration implements ToCopyableBuilder { + + private final Long thresholdInBytes; + private final Long minimumPartSizeInBytes; + private final Long apiCallBufferSizeInBytes; + + private MultipartConfiguration(DefaultMultipartConfigBuilder builder) { + this.thresholdInBytes = builder.thresholdInBytes; + this.minimumPartSizeInBytes = builder.minimumPartSizeInBytes; + this.apiCallBufferSizeInBytes = builder.apiCallBufferSizeInBytes; + } + + public static Builder builder() { + return new DefaultMultipartConfigBuilder(); + } + + @Override + public Builder toBuilder() { + return builder() + .apiCallBufferSizeInBytes(apiCallBufferSizeInBytes) + .minimumPartSizeInBytes(minimumPartSizeInBytes) + .thresholdInBytes(thresholdInBytes); + } + + /** + * Indicates the value of the configured threshold, in bytes. Any request whose size is less than the configured value will + * not use multipart operation + * @return the value of the configured threshold. + */ + public Long thresholdInBytes() { + return this.thresholdInBytes; + } + + /** + * Indicated the size, in bytes, of each individual part of the part requests. The actual part size used might be bigger to + * conforms to the maximum number of parts allowed per multipart requests. + * @return the value of the configured part size. + */ + public Long minimumPartSizeInBytes() { + return this.minimumPartSizeInBytes; + } + + /** + * The maximum memory, in bytes, that the SDK will use to buffer requests content into memory. + * @return the value of the configured maximum memory usage. + */ + public Long apiCallBufferSizeInBytes() { + return this.apiCallBufferSizeInBytes; + } + + /** + * Builder for a {@link MultipartConfiguration}. + */ + public interface Builder extends CopyableBuilder { + + /** + * Configures the minimum number of bytes of the body of the request required for requests to be converted to their + * multipart equivalent. Only taken into account when converting {@code putObject} and {@code copyObject} requests. + * Any request whose size is less than the configured value will not use multipart operation, + * even if multipart is enabled via {@link S3AsyncClientBuilder#multipartEnabled(Boolean)}. + *

    + * + * Default value: 8 Mib + * + * @param thresholdInBytes the value of the threshold to set. + * @return an instance of this builder. + */ + Builder thresholdInBytes(Long thresholdInBytes); + + /** + * Indicates the value of the configured threshold. + * @return the value of the threshold. + */ + Long thresholdInBytes(); + + /** + * Configures the part size, in bytes, to be used in each individual part requests. + * Only used for putObject and copyObject operations. + *

    + * When uploading large payload, the size of the payload of each individual part requests might actually be + * bigger than + * the configured value since there is a limit to the maximum number of parts possible per multipart request. If the + * configured part size would lead to a number of parts higher than the maximum allowed, a larger part size will be + * calculated instead to allow fewer part to be uploaded, to avoid the limit imposed on the maximum number of parts. + *

    + * In the case where the {@code minimumPartSizeInBytes} is set to a value higher than the {@code thresholdInBytes}, when + * the client receive a request with a size smaller than a single part multipart operation will NOT be performed + * even if the size of the request is larger than the threshold. + *

    + * Default value: 8 Mib + * + * @param minimumPartSizeInBytes the value of the part size to set + * @return an instance of this builder. + */ + Builder minimumPartSizeInBytes(Long minimumPartSizeInBytes); + + /** + * Indicated the value of the part configured size. + * @return the value of the part size + */ + Long minimumPartSizeInBytes(); + + /** + * Configures the maximum amount of memory, in bytes, the SDK will use to buffer content of requests in memory. + * Increasing this value may lead to better performance at the cost of using more memory. + *

    + * Default value: If not specified, the SDK will use the equivalent of four parts worth of memory, so 32 Mib by default. + * + * @param apiCallBufferSizeInBytes the value of the maximum memory usage. + * @return an instance of this builder. + */ + Builder apiCallBufferSizeInBytes(Long apiCallBufferSizeInBytes); + + /** + * Indicates the value of the maximum memory usage that the SDK will use. + * @return the value of the maximum memory usage. + */ + Long apiCallBufferSizeInBytes(); + } + + private static class DefaultMultipartConfigBuilder implements Builder { + private Long thresholdInBytes; + private Long minimumPartSizeInBytes; + private Long apiCallBufferSizeInBytes; + + public Builder thresholdInBytes(Long thresholdInBytes) { + this.thresholdInBytes = thresholdInBytes; + return this; + } + + public Long thresholdInBytes() { + return this.thresholdInBytes; + } + + public Builder minimumPartSizeInBytes(Long minimumPartSizeInBytes) { + this.minimumPartSizeInBytes = minimumPartSizeInBytes; + return this; + } + + public Long minimumPartSizeInBytes() { + return this.minimumPartSizeInBytes; + } + + @Override + public Builder apiCallBufferSizeInBytes(Long maximumMemoryUsageInBytes) { + this.apiCallBufferSizeInBytes = maximumMemoryUsageInBytes; + return this; + } + + @Override + public Long apiCallBufferSizeInBytes() { + return apiCallBufferSizeInBytes; + } + + @Override + public MultipartConfiguration build() { + return new MultipartConfiguration(this); + } + } +} diff --git a/services/s3/src/main/resources/codegen-resources/customization.config b/services/s3/src/main/resources/codegen-resources/customization.config index 1a1efb76c5f..f33272a2a63 100644 --- a/services/s3/src/main/resources/codegen-resources/customization.config +++ b/services/s3/src/main/resources/codegen-resources/customization.config @@ -236,8 +236,15 @@ "syncClientDecorator": "software.amazon.awssdk.services.s3.internal.client.S3SyncClientDecorator", "asyncClientDecorator": "software.amazon.awssdk.services.s3.internal.client.S3AsyncClientDecorator", "useGlobalEndpoint": true, + "multipartCustomization": { + "multipartConfigurationClass": "software.amazon.awssdk.services.s3.multipart.MultipartConfiguration", + "multipartConfigMethodDoc": "Configuration for multipart operation of this client.", + "multipartEnableMethodDoc": "Enables automatic conversion of put and copy method to their equivalent multipart operation.", + "contextParamEnabledKey": "S3AsyncClientDecorator.MULTIPART_ENABLED_KEY", + "contextParamConfigKey": "S3AsyncClientDecorator.MULTIPART_CONFIGURATION_KEY" + }, "interceptors": [ - "software.amazon.awssdk.services.s3.internal.handlers.PutObjectInterceptor", + "software.amazon.awssdk.services.s3.internal.handlers.StreamingRequestInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.CreateBucketInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.CreateMultipartUploadRequestInterceptor", "software.amazon.awssdk.services.s3.internal.handlers.EnableChunkedEncodingInterceptor", diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java index 0d0e681c364..bd5c34f9104 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyObjectHelperTest.java @@ -24,7 +24,6 @@ import static org.mockito.Mockito.when; import java.util.List; -import java.util.Random; import java.util.concurrent.CompletableFuture; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -34,6 +33,7 @@ import org.mockito.stubbing.Answer; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.internal.multipart.CopyObjectHelper; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; @@ -48,10 +48,7 @@ import software.amazon.awssdk.services.s3.model.NoSuchBucketException; import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; import software.amazon.awssdk.services.s3.model.UploadPartCopyResponse; -import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.CompletableFutureUtils; -import software.amazon.awssdk.utils.Md5Utils; - class CopyObjectHelperTest { private static final String SOURCE_BUCKET = "source"; @@ -64,10 +61,13 @@ class CopyObjectHelperTest { private S3AsyncClient s3AsyncClient; private CopyObjectHelper copyHelper; + private static final long PART_SIZE = 1024L; + private static final long UPLOAD_THRESHOLD = PART_SIZE * 2; + @BeforeEach public void setUp() { s3AsyncClient = Mockito.mock(S3AsyncClient.class); - copyHelper = new CopyObjectHelper(s3AsyncClient, PART_SIZE_BYTES); + copyHelper = new CopyObjectHelper(s3AsyncClient, PART_SIZE, UPLOAD_THRESHOLD); } @Test @@ -119,6 +119,25 @@ void singlePartCopy_happyCase_shouldSucceed() { assertThat(future.join()).isEqualTo(expectedResponse); } + @Test + void copy_doesNotExceedThreshold_shouldUseSingleObjectCopy() { + + CopyObjectRequest copyObjectRequest = copyObjectRequest(); + + stubSuccessfulHeadObjectCall(2000L); + + CopyObjectResponse expectedResponse = CopyObjectResponse.builder().build(); + CompletableFuture copyFuture = + CompletableFuture.completedFuture(expectedResponse); + + when(s3AsyncClient.copyObject(copyObjectRequest)).thenReturn(copyFuture); + + CompletableFuture future = + copyHelper.copyObject(copyObjectRequest); + + assertThat(future.join()).isEqualTo(expectedResponse); + } + @Test void multiPartCopy_fourPartsHappyCase_shouldSucceed() { CopyObjectRequest copyObjectRequest = copyObjectRequest(); @@ -180,7 +199,7 @@ void multiPartCopy_onePartFailed_shouldFailOtherPartsAndAbort() { CompletableFuture future = copyHelper.copyObject(copyObjectRequest); - assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart copy requests").hasRootCause(exception); + assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart requests").hasRootCause(exception); verify(s3AsyncClient, never()).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); @@ -218,7 +237,7 @@ void multiPartCopy_completeMultipartFailed_shouldFailAndAbort() { CompletableFuture future = copyHelper.copyObject(copyObjectRequest); - assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart copy requests").hasRootCause(exception); + assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart requests").hasRootCause(exception); ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class); verify(s3AsyncClient).abortMultipartUpload(argumentCaptor.capture()); @@ -261,6 +280,7 @@ void multiPartCopy_contentSizeExceeds10000Parts_shouldAdjustPartSize() { } } + @Test public void multiPartCopy_sseCHeadersSetInOriginalRequest_includedInCompleteMultipart() { String customerAlgorithm = "algorithm"; @@ -271,7 +291,7 @@ public void multiPartCopy_sseCHeadersSetInOriginalRequest_includedInCompleteMult .sseCustomerKey(customerKey) .sseCustomerKeyMD5(customerKeyMd5)); - stubSuccessfulHeadObjectCall(2 * PART_SIZE_BYTES); + stubSuccessfulHeadObjectCall(3 * PART_SIZE_BYTES); stubSuccessfulCreateMulipartCall(); stubSuccessfulUploadPartCopyCalls(); stubSuccessfulCompleteMultipartCall(); diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClientTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClientTest.java index f1eb68e1693..e1d04a03eb6 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClientTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/S3CrtAsyncHttpClientTest.java @@ -316,9 +316,10 @@ void build_shouldPassThroughParameters() { S3NativeClientConfiguration.builder() .maxConcurrency(100) .signingRegion("us-west-2") - .standardRetryOptions( - new StandardRetryOptions() - .withBackoffRetryOptions(new ExponentialBackoffRetryOptions().withMaxRetries(7))) + .thresholdInBytes(1024L) + .standardRetryOptions( + new StandardRetryOptions() + .withBackoffRetryOptions(new ExponentialBackoffRetryOptions().withMaxRetries(7))) .httpConfiguration(S3CrtHttpConfiguration.builder() .connectionTimeout(Duration.ofSeconds(1)) .connectionHealthConfiguration(c -> c.minimumThroughputInBps(1024L) @@ -330,6 +331,7 @@ void build_shouldPassThroughParameters() { (S3CrtAsyncHttpClient) S3CrtAsyncHttpClient.builder().s3ClientConfiguration(configuration).build(); S3ClientOptions clientOptions = client.s3ClientOptions(); assertThat(clientOptions.getConnectTimeoutMs()).isEqualTo(1000); + assertThat(clientOptions.getMultiPartUploadThreshold()).isEqualTo(1024); assertThat(clientOptions.getStandardRetryOptions().getBackoffRetryOptions().getMaxRetries()).isEqualTo(7); assertThat(clientOptions.getMaxConnections()).isEqualTo(100); assertThat(clientOptions.getMonitoringOptions()).satisfies(options -> { @@ -347,6 +349,20 @@ void build_shouldPassThroughParameters() { assertThat(clientOptions.getMaxConnections()).isEqualTo(100); } + @Test + void build_partSizeConfigured_shouldApplyToThreshold() { + long partSizeInBytes = 10L; + S3NativeClientConfiguration configuration = + S3NativeClientConfiguration.builder() + .partSizeInBytes(partSizeInBytes) + .build(); + S3CrtAsyncHttpClient client = + (S3CrtAsyncHttpClient) S3CrtAsyncHttpClient.builder().s3ClientConfiguration(configuration).build(); + S3ClientOptions clientOptions = client.s3ClientOptions(); + assertThat(clientOptions.getPartSize()).isEqualTo(partSizeInBytes); + assertThat(clientOptions.getMultiPartUploadThreshold()).isEqualTo(clientOptions.getPartSize()); + } + @Test void build_nullHttpConfiguration() { S3NativeClientConfiguration configuration = diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptorTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/StreamingRequestInterceptorTest.java similarity index 74% rename from services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptorTest.java rename to services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/StreamingRequestInterceptorTest.java index 08dce580bc9..3a8e9537147 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/PutObjectInterceptorTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/handlers/StreamingRequestInterceptorTest.java @@ -23,9 +23,10 @@ import software.amazon.awssdk.http.SdkHttpRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; -public class PutObjectInterceptorTest { - private final PutObjectInterceptor interceptor = new PutObjectInterceptor(); +public class StreamingRequestInterceptorTest { + private final StreamingRequestInterceptor interceptor = new StreamingRequestInterceptor(); @Test public void modifyHttpRequest_setsExpect100Continue_whenSdkRequestIsPutObject() { @@ -36,6 +37,16 @@ public void modifyHttpRequest_setsExpect100Continue_whenSdkRequestIsPutObject() assertThat(modifiedRequest.firstMatchingHeader("Expect")).hasValue("100-continue"); } + @Test + public void modifyHttpRequest_setsExpect100Continue_whenSdkRequestIsUploadPart() { + + final SdkHttpRequest modifiedRequest = + interceptor.modifyHttpRequest(modifyHttpRequestContext(UploadPartRequest.builder().build()), + new ExecutionAttributes()); + + assertThat(modifiedRequest.firstMatchingHeader("Expect")).hasValue("100-continue"); + } + @Test public void modifyHttpRequest_doesNotSetExpect_whenSdkRequestIsNotPutObject() { diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuTestUtils.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuTestUtils.java new file mode 100644 index 00000000000..435d5b40618 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MpuTestUtils.java @@ -0,0 +1,65 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; + +public final class MpuTestUtils { + + private MpuTestUtils() { + } + + public static void stubSuccessfulHeadObjectCall(long contentLength, S3AsyncClient s3AsyncClient) { + CompletableFuture headFuture = + CompletableFuture.completedFuture(HeadObjectResponse.builder() + .contentLength(contentLength) + .build()); + + when(s3AsyncClient.headObject(any(HeadObjectRequest.class))) + .thenReturn(headFuture); + } + + public static void stubSuccessfulCreateMultipartCall(String mpuId, S3AsyncClient s3AsyncClient) { + CompletableFuture createMultipartUploadFuture = + CompletableFuture.completedFuture(CreateMultipartUploadResponse.builder() + .uploadId(mpuId) + .build()); + + when(s3AsyncClient.createMultipartUpload(any(CreateMultipartUploadRequest.class))) + .thenReturn(createMultipartUploadFuture); + } + + public static void stubSuccessfulCompleteMultipartCall(String bucket, String key, S3AsyncClient s3AsyncClient) { + CompletableFuture completeMultipartUploadFuture = + CompletableFuture.completedFuture(CompleteMultipartUploadResponse.builder() + .bucket(bucket) + .key(key) + .build()); + + when(s3AsyncClient.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))) + .thenReturn(completeMultipartUploadFuture); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartClientUserAgentTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartClientUserAgentTest.java new file mode 100644 index 00000000000..0f41c7c78e7 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartClientUserAgentTest.java @@ -0,0 +1,82 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.ApiName; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; + +class MultipartClientUserAgentTest { + private MockAsyncHttpClient mockAsyncHttpClient; + private UserAgentInterceptor userAgentInterceptor; + private S3AsyncClient s3Client; + + @BeforeEach + void init() { + this.mockAsyncHttpClient = new MockAsyncHttpClient(); + this.userAgentInterceptor = new UserAgentInterceptor(); + s3Client = S3AsyncClient.builder() + .httpClient(mockAsyncHttpClient) + .endpointOverride(URI.create("http://localhost")) + .overrideConfiguration(c -> c.addExecutionInterceptor(userAgentInterceptor)) + .multipartConfiguration(c -> c.minimumPartSizeInBytes(512L).thresholdInBytes(512L)) + .multipartEnabled(true) + .region(Region.US_EAST_1) + .build(); + } + + @AfterEach + void reset() { + this.mockAsyncHttpClient.reset(); + } + + @Test + void validateUserAgent_nonMultipartMethod() throws Exception { + HttpExecuteResponse response = HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + mockAsyncHttpClient.stubResponses(response); + + s3Client.headObject(req -> req.key("mock").bucket("mock")).get(); + + assertThat(userAgentInterceptor.apiNames) + .anyMatch(api -> "hll".equals(api.name()) && "s3Multipart".equals(api.version())); + } + + private static final class UserAgentInterceptor implements ExecutionInterceptor { + private final List apiNames = new ArrayList<>(); + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + context.request().overrideConfiguration().ifPresent(c -> apiNames.addAll(c.apiNames())); + } + } + +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/S3MultipartClientBuilderTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/S3MultipartClientBuilderTest.java new file mode 100644 index 00000000000..510d441c4ca --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/S3MultipartClientBuilderTest.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; + +class S3MultipartClientBuilderTest { + + @Test + void multipartEnabledWithConfig_shouldBuildMultipartClient() { + S3AsyncClient client = S3AsyncClient.builder() + .multipartEnabled(true) + .multipartConfiguration(MultipartConfiguration.builder().build()) + .region(Region.US_EAST_1) + .build(); + assertThat(client).isInstanceOf(MultipartS3AsyncClient.class); + } + + @Test + void multipartEnabledWithoutConfig_shouldBuildMultipartClient() { + S3AsyncClient client = S3AsyncClient.builder() + .multipartEnabled(true) + .region(Region.US_EAST_1) + .build(); + assertThat(client).isInstanceOf(MultipartS3AsyncClient.class); + } + + @Test + void multipartDisabledWithConfig_shouldNotBuildMultipartClient() { + S3AsyncClient client = S3AsyncClient.builder() + .multipartEnabled(false) + .multipartConfiguration(b -> b.apiCallBufferSizeInBytes(1024L)) + .region(Region.US_EAST_1) + .build(); + assertThat(client).isNotInstanceOf(MultipartS3AsyncClient.class); + } + + @Test + void noMultipart_shouldNotBeMultipartClient() { + S3AsyncClient client = S3AsyncClient.builder() + .region(Region.US_EAST_1) + .build(); + assertThat(client).isNotInstanceOf(MultipartS3AsyncClient.class); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyRequestConversionUtilsTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtilsTest.java similarity index 61% rename from services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyRequestConversionUtilsTest.java rename to services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtilsTest.java index 94071ad115f..4d5a333a51d 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/crt/CopyRequestConversionUtilsTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/SdkPojoConversionUtilsTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.services.s3.internal.crt; +package software.amazon.awssdk.services.s3.internal.multipart; import static org.assertj.core.api.Assertions.assertThat; @@ -35,6 +35,7 @@ import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.services.s3.internal.multipart.SdkPojoConversionUtils; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedPart; @@ -43,19 +44,23 @@ import software.amazon.awssdk.services.s3.model.CopyPartResult; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.S3ResponseMetadata; import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.utils.Logger; -class CopyRequestConversionUtilsTest { - private static final Logger log = Logger.loggerFor(CopyRequestConversionUtils.class); +class SdkPojoConversionUtilsTest { + private static final Logger log = Logger.loggerFor(SdkPojoConversionUtils.class); private static final Random RNG = new Random(); @Test void toHeadObject_shouldCopyProperties() { CopyObjectRequest randomCopyObject = randomCopyObjectRequest(); - HeadObjectRequest convertedToHeadObject = CopyRequestConversionUtils.toHeadObjectRequest(randomCopyObject); + HeadObjectRequest convertedToHeadObject = SdkPojoConversionUtils.toHeadObjectRequest(randomCopyObject); Set fieldsToIgnore = new HashSet<>(Arrays.asList("ExpectedBucketOwner", "RequestPayer", "Bucket", @@ -69,12 +74,12 @@ void toHeadObject_shouldCopyProperties() { } @Test - void toCompletedPart_shouldCopyProperties() { + void toCompletedPart_copy_shouldCopyProperties() { CopyPartResult.Builder fromObject = CopyPartResult.builder(); setFieldsToRandomValues(fromObject.sdkFields(), fromObject); CopyPartResult result = fromObject.build(); - CompletedPart convertedCompletedPart = CopyRequestConversionUtils.toCompletedPart(result, 1); + CompletedPart convertedCompletedPart = SdkPojoConversionUtils.toCompletedPart(result, 1); verifyFieldsAreCopied(result, convertedCompletedPart, new HashSet<>(), CopyPartResult.builder().sdkFields(), CompletedPart.builder().sdkFields()); @@ -82,9 +87,9 @@ void toCompletedPart_shouldCopyProperties() { } @Test - void toCreateMultipartUploadRequest_shouldCopyProperties() { + void toCreateMultipartUploadRequest_copyObject_shouldCopyProperties() { CopyObjectRequest randomCopyObject = randomCopyObjectRequest(); - CreateMultipartUploadRequest convertedRequest = CopyRequestConversionUtils.toCreateMultipartUploadRequest(randomCopyObject); + CreateMultipartUploadRequest convertedRequest = SdkPojoConversionUtils.toCreateMultipartUploadRequest(randomCopyObject); Set fieldsToIgnore = new HashSet<>(); verifyFieldsAreCopied(randomCopyObject, convertedRequest, fieldsToIgnore, CopyObjectRequest.builder().sdkFields(), @@ -100,7 +105,7 @@ void toCopyObjectResponse_shouldCopyProperties() { responseBuilder.responseMetadata(s3ResponseMetadata).sdkHttpResponse(sdkHttpFullResponse); CompleteMultipartUploadResponse result = responseBuilder.build(); - CopyObjectResponse convertedRequest = CopyRequestConversionUtils.toCopyObjectResponse(result); + CopyObjectResponse convertedRequest = SdkPojoConversionUtils.toCopyObjectResponse(result); Set fieldsToIgnore = new HashSet<>(); verifyFieldsAreCopied(result, convertedRequest, fieldsToIgnore, CompleteMultipartUploadResponse.builder().sdkFields(), @@ -111,29 +116,91 @@ void toCopyObjectResponse_shouldCopyProperties() { } @Test - void toAbortMultipartUploadRequest_shouldCopyProperties() { + void toAbortMultipartUploadRequest_copyObject_shouldCopyProperties() { CopyObjectRequest randomCopyObject = randomCopyObjectRequest(); - AbortMultipartUploadRequest convertedRequest = CopyRequestConversionUtils.toAbortMultipartUploadRequest(randomCopyObject, - "id"); + AbortMultipartUploadRequest convertedRequest = SdkPojoConversionUtils.toAbortMultipartUploadRequest(randomCopyObject).build(); Set fieldsToIgnore = new HashSet<>(); verifyFieldsAreCopied(randomCopyObject, convertedRequest, fieldsToIgnore, CopyObjectRequest.builder().sdkFields(), AbortMultipartUploadRequest.builder().sdkFields()); + } - assertThat(convertedRequest.uploadId()).isEqualTo("id"); + @Test + void toAbortMultipartUploadRequest_putObject_shouldCopyProperties() { + PutObjectRequest randomCopyObject = randomPutObjectRequest(); + AbortMultipartUploadRequest convertedRequest = SdkPojoConversionUtils.toAbortMultipartUploadRequest(randomCopyObject).build(); + Set fieldsToIgnore = new HashSet<>(); + verifyFieldsAreCopied(randomCopyObject, convertedRequest, fieldsToIgnore, + PutObjectRequest.builder().sdkFields(), + AbortMultipartUploadRequest.builder().sdkFields()); } @Test void toUploadPartCopyRequest_shouldCopyProperties() { CopyObjectRequest randomCopyObject = randomCopyObjectRequest(); - UploadPartCopyRequest convertedObject = CopyRequestConversionUtils.toUploadPartCopyRequest(randomCopyObject, 1, "id", - "bytes=0-1024"); + UploadPartCopyRequest convertedObject = SdkPojoConversionUtils.toUploadPartCopyRequest(randomCopyObject, 1, "id", + "bytes=0-1024"); Set fieldsToIgnore = new HashSet<>(Collections.singletonList("CopySource")); verifyFieldsAreCopied(randomCopyObject, convertedObject, fieldsToIgnore, CopyObjectRequest.builder().sdkFields(), UploadPartCopyRequest.builder().sdkFields()); } + @Test + void toUploadPartRequest_shouldCopyProperties() { + PutObjectRequest randomObject = randomPutObjectRequest(); + UploadPartRequest convertedObject = SdkPojoConversionUtils.toUploadPartRequest(randomObject, 1, "id"); + Set fieldsToIgnore = new HashSet<>(Arrays.asList("ChecksumCRC32", "ChecksumSHA256", "ContentMD5", "ChecksumSHA1", + "ChecksumCRC32C")); + verifyFieldsAreCopied(randomObject, convertedObject, fieldsToIgnore, + PutObjectRequest.builder().sdkFields(), + UploadPartRequest.builder().sdkFields()); + assertThat(convertedObject.partNumber()).isEqualTo(1); + assertThat(convertedObject.uploadId()).isEqualTo("id"); + } + + @Test + void toPutObjectResponse_shouldCopyProperties() { + CompleteMultipartUploadResponse.Builder builder = CompleteMultipartUploadResponse.builder(); + populateFields(builder); + S3ResponseMetadata s3ResponseMetadata = S3ResponseMetadata.create(DefaultAwsResponseMetadata.create(new HashMap<>())); + SdkHttpFullResponse sdkHttpFullResponse = SdkHttpFullResponse.builder().statusCode(200).build(); + builder.responseMetadata(s3ResponseMetadata).sdkHttpResponse(sdkHttpFullResponse); + CompleteMultipartUploadResponse randomObject = builder.build(); + PutObjectResponse convertedObject = SdkPojoConversionUtils.toPutObjectResponse(randomObject); + Set fieldsToIgnore = new HashSet<>(); + verifyFieldsAreCopied(randomObject, convertedObject, fieldsToIgnore, + CompleteMultipartUploadResponse.builder().sdkFields(), + PutObjectResponse.builder().sdkFields()); + + assertThat(convertedObject.sdkHttpResponse()).isEqualTo(sdkHttpFullResponse); + assertThat(convertedObject.responseMetadata()).isEqualTo(s3ResponseMetadata); + } + + @Test + void toCreateMultipartUploadRequest_putObjectRequest_shouldCopyProperties() { + PutObjectRequest randomObject = randomPutObjectRequest(); + CreateMultipartUploadRequest convertedObject = SdkPojoConversionUtils.toCreateMultipartUploadRequest(randomObject); + Set fieldsToIgnore = new HashSet<>(); + System.out.println(convertedObject); + verifyFieldsAreCopied(randomObject, convertedObject, fieldsToIgnore, + PutObjectRequest.builder().sdkFields(), + CreateMultipartUploadRequest.builder().sdkFields()); + } + + @Test + void toCompletedPart_putObject_shouldCopyProperties() { + UploadPartResponse.Builder fromObject = UploadPartResponse.builder(); + setFieldsToRandomValues(fromObject.sdkFields(), fromObject); + UploadPartResponse result = fromObject.build(); + + CompletedPart convertedCompletedPart = SdkPojoConversionUtils.toCompletedPart(result, 1); + verifyFieldsAreCopied(result, convertedCompletedPart, new HashSet<>(), + UploadPartResponse.builder().sdkFields(), + CompletedPart.builder().sdkFields()); + assertThat(convertedCompletedPart.partNumber()).isEqualTo(1); + } + private static void verifyFieldsAreCopied(SdkPojo requestConvertedFrom, SdkPojo requestConvertedTo, Set fieldsToIgnore, @@ -148,7 +215,7 @@ private static void verifyFieldsAreCopied(SdkPojo requestConvertedFrom, SdkField toField = toObjectEntry.getValue(); if (fieldsToIgnore.contains(toField.memberName())) { - log.info(() -> "Ignoring fields: " + toField.locationName()); + log.info(() -> "Ignoring fields: " + toField.memberName()); continue; } @@ -156,7 +223,7 @@ private static void verifyFieldsAreCopied(SdkPojo requestConvertedFrom, if (fromField == null) { log.info(() -> String.format("Ignoring field [%s] because the object to convert from does not have such field ", - toField.locationName())); + toField.memberName())); continue; } @@ -176,6 +243,16 @@ private CopyObjectRequest randomCopyObjectRequest() { return builder.build(); } + private PutObjectRequest randomPutObjectRequest() { + PutObjectRequest.Builder builder = PutObjectRequest.builder(); + setFieldsToRandomValues(builder.sdkFields(), builder); + return builder.build(); + } + + private void populateFields(SdkPojo pojo) { + setFieldsToRandomValues(pojo.sdkFields(), pojo); + } + private void setFieldsToRandomValues(Collection> fields, Object builder) { for (SdkField f : fields) { setFieldToRandomValue(f, builder); @@ -194,6 +271,8 @@ private static void setFieldToRandomValue(SdkField sdkField, Object obj) { sdkField.set(obj, new HashMap<>()); } else if (targetClass.equals(Boolean.class)) { sdkField.set(obj, true); + } else if (targetClass.equals(Long.class)) { + sdkField.set(obj, randomLong()); } else { throw new IllegalArgumentException("Unknown SdkField type: " + targetClass + " name: " + sdkField.memberName()); } @@ -202,7 +281,7 @@ private static void setFieldToRandomValue(SdkField sdkField, Object obj) { private static Map> sdkFieldMap(Collection> sdkFields) { Map> map = new HashMap<>(sdkFields.size()); for (SdkField f : sdkFields) { - String locName = f.locationName(); + String locName = f.memberName(); if (map.put(locName, f) != null) { throw new IllegalArgumentException("Multiple SdkFields map to same location name"); } @@ -217,4 +296,8 @@ private static Instant randomInstant() { private static Integer randomInteger() { return RNG.nextInt(); } + + private static long randomLong() { + return RNG.nextLong(); + } } diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java new file mode 100644 index 00000000000..11d54a73fb7 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java @@ -0,0 +1,412 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.services.s3.internal.multipart.MpuTestUtils.stubSuccessfulCompleteMultipartCall; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.mockito.stubbing.OngoingStubbing; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.testutils.RandomTempFile; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +public class UploadObjectHelperTest { + + private static final String BUCKET = "bucket"; + private static final String KEY = "key"; + private static final long PART_SIZE = 8 * 1024; + + // Should contain four parts: [8KB, 8KB, 8KB, 1KB] + private static final long MPU_CONTENT_SIZE = 25 * 1024; + private static final long THRESHOLD = 10 * 1024; + private static final String UPLOAD_ID = "1234"; + + private static RandomTempFile testFile; + private UploadObjectHelper uploadHelper; + private S3AsyncClient s3AsyncClient; + + @BeforeAll + public static void beforeAll() throws IOException { + testFile = new RandomTempFile("testfile.dat", MPU_CONTENT_SIZE); + } + + @AfterAll + public static void afterAll() throws Exception { + testFile.delete(); + } + + public static Stream asyncRequestBody() { + return Stream.of(new UnknownContentLengthAsyncRequestBody(AsyncRequestBody.fromFile(testFile)), + AsyncRequestBody.fromFile(testFile)); + } + + @BeforeEach + public void beforeEach() { + s3AsyncClient = Mockito.mock(S3AsyncClient.class); + uploadHelper = new UploadObjectHelper(s3AsyncClient, PART_SIZE, THRESHOLD, PART_SIZE * 2); + } + + @ParameterizedTest + @ValueSource(longs = {THRESHOLD, PART_SIZE, THRESHOLD - 1, PART_SIZE - 1}) + void uploadObject_contentLengthDoesNotExceedThresholdAndPartSize_shouldUploadInOneChunk(long contentLength) { + PutObjectRequest putObjectRequest = putObjectRequest(contentLength); + AsyncRequestBody asyncRequestBody = Mockito.mock(AsyncRequestBody.class); + + CompletableFuture completedFuture = + CompletableFuture.completedFuture(PutObjectResponse.builder().build()); + when(s3AsyncClient.putObject(putObjectRequest, asyncRequestBody)).thenReturn(completedFuture); + uploadHelper.uploadObject(putObjectRequest, asyncRequestBody).join(); + Mockito.verify(s3AsyncClient).putObject(putObjectRequest, asyncRequestBody); + } + + @ParameterizedTest + @ValueSource(longs = {PART_SIZE, PART_SIZE - 1}) + void uploadObject_unKnownContentLengthDoesNotExceedPartSize_shouldUploadInOneChunk(long contentLength) { + PutObjectRequest putObjectRequest = putObjectRequest(contentLength); + AsyncRequestBody asyncRequestBody = + new UnknownContentLengthAsyncRequestBody(AsyncRequestBody.fromBytes(RandomStringUtils.randomAscii(Math.toIntExact(contentLength)) + .getBytes(StandardCharsets.UTF_8))); + + CompletableFuture completedFuture = + CompletableFuture.completedFuture(PutObjectResponse.builder().build()); + when(s3AsyncClient.putObject(putObjectRequest, asyncRequestBody)).thenReturn(completedFuture); + uploadHelper.uploadObject(putObjectRequest, asyncRequestBody).join(); + Mockito.verify(s3AsyncClient).putObject(putObjectRequest, asyncRequestBody); + } + + @ParameterizedTest + @MethodSource("asyncRequestBody") + void uploadObject_contentLengthExceedThresholdAndPartSize_shouldUseMPU(AsyncRequestBody asyncRequestBody) { + PutObjectRequest putObjectRequest = putObjectRequest(null); + + MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + stubSuccessfulUploadPartCalls(); + stubSuccessfulCompleteMultipartCall(BUCKET, KEY, s3AsyncClient); + + uploadHelper.uploadObject(putObjectRequest, asyncRequestBody).join(); + ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(UploadPartRequest.class); + ArgumentCaptor requestBodyArgumentCaptor = ArgumentCaptor.forClass(AsyncRequestBody.class); + verify(s3AsyncClient, times(4)).uploadPart(requestArgumentCaptor.capture(), + requestBodyArgumentCaptor.capture()); + + List actualRequests = requestArgumentCaptor.getAllValues(); + List actualRequestBodies = requestBodyArgumentCaptor.getAllValues(); + assertThat(actualRequestBodies).hasSize(4); + assertThat(actualRequests).hasSize(4); + + for (int i = 0; i < actualRequests.size(); i++) { + UploadPartRequest request = actualRequests.get(i); + AsyncRequestBody requestBody = actualRequestBodies.get(i); + assertThat(request.partNumber()).isEqualTo( i + 1); + assertThat(request.bucket()).isEqualTo(BUCKET); + assertThat(request.key()).isEqualTo(KEY); + + if (i == actualRequests.size() - 1) { + assertThat(requestBody.contentLength()).hasValue(1024L); + } else{ + assertThat(requestBody.contentLength()).hasValue(PART_SIZE); + } + } + } + + /** + * The second part failed, it should cancel ongoing part(first part). + */ + @ParameterizedTest + @MethodSource("asyncRequestBody") + void mpu_onePartFailed_shouldFailOtherPartsAndAbort(AsyncRequestBody asyncRequestBody) { + PutObjectRequest putObjectRequest = putObjectRequest(MPU_CONTENT_SIZE); + + MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + CompletableFuture ongoingRequest = new CompletableFuture<>(); + + SdkClientException exception = SdkClientException.create("request failed"); + + OngoingStubbing> ongoingStubbing = + when(s3AsyncClient.uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class))).thenReturn(ongoingRequest); + + stubFailedUploadPartCalls(ongoingStubbing, exception); + + when(s3AsyncClient.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) + .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); + + CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, + asyncRequestBody); + + assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart upload requests").hasRootCause(exception); + + verify(s3AsyncClient, never()).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(AbortMultipartUploadRequest.class); + verify(s3AsyncClient).abortMultipartUpload(argumentCaptor.capture()); + AbortMultipartUploadRequest actualRequest = argumentCaptor.getValue(); + assertThat(actualRequest.uploadId()).isEqualTo(UPLOAD_ID); + + try { + ongoingRequest.get(1, TimeUnit.MILLISECONDS); + fail("no exception thrown"); + } catch (Exception e) { + assertThat(e.getCause()).hasMessageContaining("Failed to send multipart upload requests").hasRootCause(exception); + } + } + + /** + * This test is not parameterized because for unknown content length, the progress is nondeterministic. For example, we + * don't know if it has created multipart upload when we cancel the future. + */ + @Test + void upload_knownContentLengthCancelResponseFuture_shouldCancelCreateMultipart() { + PutObjectRequest putObjectRequest = putObjectRequest(null); + + CompletableFuture createMultipartFuture = new CompletableFuture<>(); + + when(s3AsyncClient.createMultipartUpload(any(CreateMultipartUploadRequest.class))) + .thenReturn(createMultipartFuture); + + CompletableFuture future = + uploadHelper.uploadObject(putObjectRequest, AsyncRequestBody.fromFile(testFile)); + + future.cancel(true); + + assertThat(createMultipartFuture).isCancelled(); + } + + @Test + void upload_knownContentLengthCancelResponseFuture_shouldCancelUploadPart() { + PutObjectRequest putObjectRequest = putObjectRequest(null); + + CompletableFuture createMultipartFuture = new CompletableFuture<>(); + + MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + + CompletableFuture ongoingRequest = new CompletableFuture<>(); + + when(s3AsyncClient.uploadPart(any(UploadPartRequest.class), + any(AsyncRequestBody.class))).thenReturn(ongoingRequest); + + CompletableFuture future = + uploadHelper.uploadObject(putObjectRequest, AsyncRequestBody.fromFile(testFile)); + + future.cancel(true); + + assertThat(ongoingRequest).isCancelled(); + } + + @ParameterizedTest + @MethodSource("asyncRequestBody") + void uploadObject_createMultipartUploadFailed_shouldFail(AsyncRequestBody asyncRequestBody) { + PutObjectRequest putObjectRequest = putObjectRequest(null); + + SdkClientException exception = SdkClientException.create("CompleteMultipartUpload failed"); + + CompletableFuture createMultipartUploadFuture = + CompletableFutureUtils.failedFuture(exception); + + when(s3AsyncClient.createMultipartUpload(any(CreateMultipartUploadRequest.class))) + .thenReturn(createMultipartUploadFuture); + + CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, + asyncRequestBody); + assertThatThrownBy(future::join).hasMessageContaining("Failed to initiate multipart upload") + .hasRootCause(exception); + } + + @ParameterizedTest + @MethodSource("asyncRequestBody") + void uploadObject_completeMultipartFailed_shouldFailAndAbort(AsyncRequestBody asyncRequestBody) { + PutObjectRequest putObjectRequest = putObjectRequest(null); + + MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + stubSuccessfulUploadPartCalls(); + + SdkClientException exception = SdkClientException.create("CompleteMultipartUpload failed"); + + CompletableFuture completeMultipartUploadFuture = + CompletableFutureUtils.failedFuture(exception); + + when(s3AsyncClient.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))) + .thenReturn(completeMultipartUploadFuture); + + when(s3AsyncClient.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) + .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); + + CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, + asyncRequestBody); + assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart requests") + .hasRootCause(exception); + } + + @ParameterizedTest() + @ValueSource(booleans = {false, true}) + void uploadObject_requestBodyOnError_shouldFailAndAbort(boolean contentLengthKnown) { + PutObjectRequest putObjectRequest = putObjectRequest(null); + Exception exception = new RuntimeException("error"); + + Long contentLength = contentLengthKnown ? MPU_CONTENT_SIZE : null; + ErroneousAsyncRequestBody erroneousAsyncRequestBody = + new ErroneousAsyncRequestBody(contentLength, exception); + MpuTestUtils.stubSuccessfulCreateMultipartCall(UPLOAD_ID, s3AsyncClient); + stubSuccessfulUploadPartCalls(); + + when(s3AsyncClient.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) + .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); + + CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, + erroneousAsyncRequestBody); + assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart upload requests") + .hasRootCause(exception); + } + + private static PutObjectRequest putObjectRequest(Long contentLength) { + return PutObjectRequest.builder() + .bucket(BUCKET) + .key(KEY) + .contentLength(contentLength) + .build(); + } + + private void stubSuccessfulUploadPartCalls() { + when(s3AsyncClient.uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class))) + .thenAnswer(new Answer>() { + int numberOfCalls = 0; + + @Override + public CompletableFuture answer(InvocationOnMock invocationOnMock) { + AsyncRequestBody AsyncRequestBody = invocationOnMock.getArgument(1); + // Draining the request body + AsyncRequestBody.subscribe(b -> {}); + + numberOfCalls++; + return CompletableFuture.completedFuture(UploadPartResponse.builder() + .checksumCRC32("crc" + numberOfCalls) + .build()); + } + }); + } + + private OngoingStubbing> stubFailedUploadPartCalls(OngoingStubbing> stubbing, Exception exception) { + return stubbing.thenAnswer(new Answer>() { + + @Override + public CompletableFuture answer(InvocationOnMock invocationOnMock) { + AsyncRequestBody AsyncRequestBody = invocationOnMock.getArgument(1); + // Draining the request body + AsyncRequestBody.subscribe(b -> {}); + + return CompletableFutureUtils.failedFuture(exception); + } + }); + } + + private static class UnknownContentLengthAsyncRequestBody implements AsyncRequestBody { + private final AsyncRequestBody delegate; + private volatile boolean cancelled; + + public UnknownContentLengthAsyncRequestBody(AsyncRequestBody asyncRequestBody) { + this.delegate = asyncRequestBody; + } + + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + delegate.subscribe(s); + } + } + + private static class ErroneousAsyncRequestBody implements AsyncRequestBody { + private volatile boolean isDone; + private final Long contentLength; + private final Exception exception; + + private ErroneousAsyncRequestBody(Long contentLength, Exception exception) { + this.contentLength = contentLength; + this.exception = exception; + } + + @Override + public Optional contentLength() { + return Optional.ofNullable(contentLength); + } + + + @Override + public void subscribe(Subscriber s) { + s.onSubscribe(new Subscription() { + @Override + public void request(long n) { + if (isDone) { + return; + } + isDone = true; + s.onNext(ByteBuffer.wrap(RandomStringUtils.randomAscii(Math.toIntExact(PART_SIZE)).getBytes(StandardCharsets.UTF_8))); + s.onNext(ByteBuffer.wrap(RandomStringUtils.randomAscii(Math.toIntExact(PART_SIZE)).getBytes(StandardCharsets.UTF_8))); + s.onError(exception); + + } + + @Override + public void cancel() { + } + }); + + } + } +} diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index 874a05216ee..17e829b4330 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index de684da6402..1bf58c4535e 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index b9edf54d313..5a015027b4f 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/sagemaker/src/main/resources/codegen-resources/endpoint-rule-set.json index 9d96d35d2d6..c7c60ddc19f 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/sagemaker/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.sagemaker-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://api.sagemaker-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,180 +225,140 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://api-fips.sagemaker.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://api-fips.sagemaker.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://api.sagemaker-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://api-fips.sagemaker.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.sagemaker.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://api-fips.sagemaker.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://api.sagemaker-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://api.sagemaker.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://api.sagemaker.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://api.sagemaker.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index 91ea56331fa..ab6de9858fc 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -2124,6 +2124,19 @@ "output":{"shape":"GetSagemakerServicecatalogPortfolioStatusOutput"}, "documentation":"

    Gets the status of Service Catalog in SageMaker. Service Catalog is used to create SageMaker projects.

    " }, + "GetScalingConfigurationRecommendation":{ + "name":"GetScalingConfigurationRecommendation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetScalingConfigurationRecommendationRequest"}, + "output":{"shape":"GetScalingConfigurationRecommendationResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

    Starts an Amazon SageMaker Inference Recommender autoscaling recommendation job. Returns recommendations for autoscaling policies that you can apply to your SageMaker endpoint.

    " + }, "GetSearchSuggestions":{ "name":"GetSearchSuggestions", "http":{ @@ -5626,7 +5639,7 @@ }, "EndTimeOffset":{ "shape":"MonitoringTimeOffsetString", - "documentation":"

    If specified, monitoring jobs substract this time from the end time. For information about using offsets for scheduling monitoring jobs, see Schedule Model Quality Monitoring Jobs.

    " + "documentation":"

    If specified, monitoring jobs subtract this time from the end time. For information about using offsets for scheduling monitoring jobs, see Schedule Model Quality Monitoring Jobs.

    " } }, "documentation":"

    Input object for the batch transform job.

    " @@ -8426,8 +8439,8 @@ ], "members":{ "ModelCardName":{ - "shape":"EntityName", - "documentation":"

    The name of the model card to export.

    " + "shape":"ModelCardNameOrArn", + "documentation":"

    The name or Amazon Resource Name (ARN) of the model card to export.

    " }, "ModelCardVersion":{ "shape":"Integer", @@ -9650,6 +9663,24 @@ "min":1, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)${1,256}" }, + "CustomizedMetricSpecification":{ + "type":"structure", + "members":{ + "MetricName":{ + "shape":"String", + "documentation":"

    The name of the customized metric.

    " + }, + "Namespace":{ + "shape":"String", + "documentation":"

    The namespace of the customized metric.

    " + }, + "Statistic":{ + "shape":"Statistic", + "documentation":"

    The statistic of the customized metric.

    " + } + }, + "documentation":"

    A customized metric.

    " + }, "DataCaptureConfig":{ "type":"structure", "required":[ @@ -12052,7 +12083,7 @@ "DataCaptureConfig":{"shape":"DataCaptureConfigSummary"}, "EndpointStatus":{ "shape":"EndpointStatus", - "documentation":"

    The status of the endpoint.

    • OutOfService: Endpoint is not available to take incoming requests.

    • Creating: CreateEndpoint is executing.

    • Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.

    • SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or deleted or re-scaled until it has completed. This maintenance operation does not change any customer-specified values such as VPC config, KMS encryption, model, instance type, or instance count.

    • RollingBack: Endpoint fails to scale up or down or change its variant weight and is in the process of rolling back to its previous configuration. Once the rollback completes, endpoint returns to an InService status. This transitional status only applies to an endpoint that has autoscaling enabled and is undergoing variant weight or capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called explicitly.

    • InService: Endpoint is available to process incoming requests.

    • Deleting: DeleteEndpoint is executing.

    • Failed: Endpoint could not be created, updated, or re-scaled. Use the FailureReason value returned by DescribeEndpoint for information about the failure. DeleteEndpoint is the only operation that can be performed on a failed endpoint.

    " + "documentation":"

    The status of the endpoint.

    • OutOfService: Endpoint is not available to take incoming requests.

    • Creating: CreateEndpoint is executing.

    • Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.

    • SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or deleted or re-scaled until it has completed. This maintenance operation does not change any customer-specified values such as VPC config, KMS encryption, model, instance type, or instance count.

    • RollingBack: Endpoint fails to scale up or down or change its variant weight and is in the process of rolling back to its previous configuration. Once the rollback completes, endpoint returns to an InService status. This transitional status only applies to an endpoint that has autoscaling enabled and is undergoing variant weight or capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called explicitly.

    • InService: Endpoint is available to process incoming requests.

    • Deleting: DeleteEndpoint is executing.

    • Failed: Endpoint could not be created, updated, or re-scaled. Use the FailureReason value returned by DescribeEndpoint for information about the failure. DeleteEndpoint is the only operation that can be performed on a failed endpoint.

    • UpdateRollbackFailed: Both the rolling deployment and auto-rollback failed. Your endpoint is in service with a mix of the old and new endpoint configurations. For information about how to remedy this issue and restore the endpoint's status to InService, see Rolling Deployments.

    " }, "FailureReason":{ "shape":"FailureReason", @@ -13205,7 +13236,7 @@ }, "ModelCardName":{ "shape":"EntityName", - "documentation":"

    The name of the model card that the model export job exports.

    " + "documentation":"

    The name or Amazon Resource Name (ARN) of the model card that the model export job exports.

    " }, "ModelCardVersion":{ "shape":"Integer", @@ -13238,8 +13269,8 @@ "required":["ModelCardName"], "members":{ "ModelCardName":{ - "shape":"EntityName", - "documentation":"

    The name of the model card to describe.

    " + "shape":"ModelCardNameOrArn", + "documentation":"

    The name or Amazon Resource Name (ARN) of the model card to describe.

    " }, "ModelCardVersion":{ "shape":"Integer", @@ -15279,6 +15310,7 @@ "Delete_Failed" ] }, + "Double":{"type":"double"}, "DoubleParameterValue":{"type":"double"}, "DriftCheckBaselines":{ "type":"structure", @@ -15362,6 +15394,32 @@ }, "documentation":"

    Represents the drift check model quality baselines that can be used when the model monitor is set using the model package.

    " }, + "DynamicScalingConfiguration":{ + "type":"structure", + "members":{ + "MinCapacity":{ + "shape":"Integer", + "documentation":"

    The recommended minimum capacity to specify for your autoscaling policy.

    " + }, + "MaxCapacity":{ + "shape":"Integer", + "documentation":"

    The recommended maximum capacity to specify for your autoscaling policy.

    " + }, + "ScaleInCooldown":{ + "shape":"Integer", + "documentation":"

    The recommended scale in cooldown time for your autoscaling policy.

    " + }, + "ScaleOutCooldown":{ + "shape":"Integer", + "documentation":"

    The recommended scale out cooldown time for your autoscaling policy.

    " + }, + "ScalingPolicies":{ + "shape":"ScalingPolicies", + "documentation":"

    An object of the scaling policies for each metric.

    " + } + }, + "documentation":"

    An object with the recommended values for you to specify when creating an autoscaling policy.

    " + }, "EMRStepMetadata":{ "type":"structure", "members":{ @@ -16880,6 +16938,13 @@ "max":40, "min":0 }, + "FlatInvocations":{ + "type":"string", + "enum":[ + "Continue", + "Stop" + ] + }, "Float":{"type":"float"}, "FlowDefinitionArn":{ "type":"string", @@ -17142,6 +17207,65 @@ } } }, + "GetScalingConfigurationRecommendationRequest":{ + "type":"structure", + "required":["InferenceRecommendationsJobName"], + "members":{ + "InferenceRecommendationsJobName":{ + "shape":"RecommendationJobName", + "documentation":"

    The name of a previously completed Inference Recommender job.

    " + }, + "RecommendationId":{ + "shape":"String", + "documentation":"

    The recommendation ID of a previously completed inference recommendation. This ID should come from one of the recommendations returned by the job specified in the InferenceRecommendationsJobName field.

    Specify either this field or the EndpointName field.

    " + }, + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

    The name of an endpoint benchmarked during a previously completed inference recommendation job. This name should come from one of the recommendations returned by the job specified in the InferenceRecommendationsJobName field.

    Specify either this field or the RecommendationId field.

    " + }, + "TargetCpuUtilizationPerCore":{ + "shape":"UtilizationPercentagePerCore", + "documentation":"

    The percentage of how much utilization you want an instance to use before autoscaling. The default value is 50%.

    " + }, + "ScalingPolicyObjective":{ + "shape":"ScalingPolicyObjective", + "documentation":"

    An object where you specify the anticipated traffic pattern for an endpoint.

    " + } + } + }, + "GetScalingConfigurationRecommendationResponse":{ + "type":"structure", + "members":{ + "InferenceRecommendationsJobName":{ + "shape":"RecommendationJobName", + "documentation":"

    The name of a previously completed Inference Recommender job.

    " + }, + "RecommendationId":{ + "shape":"String", + "documentation":"

    The recommendation ID of a previously completed inference recommendation.

    " + }, + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

    The name of an endpoint benchmarked during a previously completed Inference Recommender job.

    " + }, + "TargetCpuUtilizationPerCore":{ + "shape":"UtilizationPercentagePerCore", + "documentation":"

    The percentage of how much utilization you want an instance to use before autoscaling, which you specified in the request. The default value is 50%.

    " + }, + "ScalingPolicyObjective":{ + "shape":"ScalingPolicyObjective", + "documentation":"

    An object representing the anticipated traffic pattern for an endpoint that you specified in the request.

    " + }, + "Metric":{ + "shape":"ScalingPolicyMetric", + "documentation":"

    An object with a list of metrics that were benchmarked during the previously completed Inference Recommender job.

    " + }, + "DynamicScalingConfiguration":{ + "shape":"DynamicScalingConfiguration", + "documentation":"

    An object with the recommended values for you to specify when creating an autoscaling policy.

    " + } + } + }, "GetSearchSuggestionsRequest":{ "type":"structure", "required":["Resource"], @@ -21896,8 +22020,8 @@ "documentation":"

    The maximum number of model card versions to list.

    " }, "ModelCardName":{ - "shape":"EntityName", - "documentation":"

    List model card versions for the model card with the specified name.

    " + "shape":"ModelCardNameOrArn", + "documentation":"

    List model card versions for the model card with the specified name or Amazon Resource Name (ARN).

    " }, "ModelCardStatus":{ "shape":"ModelCardStatus", @@ -23797,6 +23921,21 @@ "Test" ] }, + "MetricSpecification":{ + "type":"structure", + "members":{ + "Predefined":{ + "shape":"PredefinedMetricSpecification", + "documentation":"

    Information about a predefined metric.

    " + }, + "Customized":{ + "shape":"CustomizedMetricSpecification", + "documentation":"

    Information about a customized metric.

    " + } + }, + "documentation":"

    An object containing information about a metric.

    ", + "union":true + }, "MetricValue":{"type":"float"}, "MetricsSource":{ "type":"structure", @@ -24113,6 +24252,12 @@ }, "documentation":"

    Configure the export output details for an Amazon SageMaker Model Card.

    " }, + "ModelCardNameOrArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:model-card/.*)?([a-zA-Z0-9](-*[a-zA-Z0-9]){0,62})" + }, "ModelCardProcessingStatus":{ "type":"string", "enum":[ @@ -24588,7 +24733,7 @@ "members":{ "Percentile":{ "shape":"String64", - "documentation":"

    The model latency percentile threshold.

    " + "documentation":"

    The model latency percentile threshold. For custom load tests, specify the value as P95.

    " }, "ValueInMilliseconds":{ "shape":"Integer", @@ -26433,6 +26578,10 @@ "max":9, "min":1 }, + "NumberOfSteps":{ + "type":"integer", + "min":1 + }, "ObjectiveStatus":{ "type":"string", "enum":[ @@ -26699,7 +26848,7 @@ }, "CompilerOptions":{ "shape":"CompilerOptions", - "documentation":"

    Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

    • DTYPE: Specifies the data type for the input. When compiling for ml_* (except for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. \"float32\" is used if \"DTYPE\" is not specified. Options for data type are:

      • float32: Use either \"float\" or \"float32\".

      • int64: Use either \"int64\" or \"long\".

      For example, {\"dtype\" : \"float32\"}.

    • CPU: Compilation for CPU supports the following compiler options.

      • mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'}

      • mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']}

    • ARM: Details of ARM CPU compilations.

      • NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors.

        For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support.

    • NVIDIA: Compilation for NVIDIA GPU supports the following compiler options.

      • gpu_code: Specifies the targeted architecture.

      • trt-ver: Specifies the TensorRT versions in x.y.z. format.

      • cuda-ver: Specifies the CUDA version in x.y format.

      For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'}

    • ANDROID: Compilation for the Android OS supports the following compiler options:

      • ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}.

      • mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support.

    • INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For example, \"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\".

      For information about supported compiler options, see Neuron Compiler CLI.

    • CoreML: Compilation for the CoreML OutputConfig TargetDevice supports the following compiler options:

      • class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {\"class_labels\": \"imagenet_labels_1000.txt\"}. Labels inside the txt file should be separated by newlines.

    • EIA: Compilation for the Elastic Inference Accelerator supports the following compiler options:

      • precision_mode: Specifies the precision of compiled artifacts. Supported values are \"FP16\" and \"FP32\". Default is \"FP32\".

      • signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults is TensorFlow's default signature def key.

      • output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set at most one API field, either: signature_def_key or output_names.

      For example: {\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}

    " + "documentation":"

    Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

    • DTYPE: Specifies the data type for the input. When compiling for ml_* (except for ml_inf) instances using PyTorch framework, provide the data type (dtype) of the model's input. \"float32\" is used if \"DTYPE\" is not specified. Options for data type are:

      • float32: Use either \"float\" or \"float32\".

      • int64: Use either \"int64\" or \"long\".

      For example, {\"dtype\" : \"float32\"}.

    • CPU: Compilation for CPU supports the following compiler options.

      • mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'}

      • mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']}

    • ARM: Details of ARM CPU compilations.

      • NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors.

        For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support.

    • NVIDIA: Compilation for NVIDIA GPU supports the following compiler options.

      • gpu_code: Specifies the targeted architecture.

      • trt-ver: Specifies the TensorRT versions in x.y.z. format.

      • cuda-ver: Specifies the CUDA version in x.y format.

      For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'}

    • ANDROID: Compilation for the Android OS supports the following compiler options:

      • ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}.

      • mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support.

    • INFERENTIA: Compilation for target ml_inf1 uses compiler options passed in as a JSON string. For example, \"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\".

      For information about supported compiler options, see Neuron Compiler CLI Reference Guide.

    • CoreML: Compilation for the CoreML OutputConfig TargetDevice supports the following compiler options:

      • class_labels: Specifies the classification labels file name inside input tar.gz file. For example, {\"class_labels\": \"imagenet_labels_1000.txt\"}. Labels inside the txt file should be separated by newlines.

    • EIA: Compilation for the Elastic Inference Accelerator supports the following compiler options:

      • precision_mode: Specifies the precision of compiled artifacts. Supported values are \"FP16\" and \"FP32\". Default is \"FP32\".

      • signature_def_key: Specifies the signature to use for models in SavedModel format. Defaults is TensorFlow's default signature def key.

      • output_names: Specifies a list of output tensor names for models in FrozenGraph format. Set at most one API field, either: signature_def_key or output_names.

      For example: {\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}

    " }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -26983,7 +27132,7 @@ "members":{ "InitialNumberOfUsers":{ "shape":"InitialNumberOfUsers", - "documentation":"

    Specifies how many concurrent users to start with.

    " + "documentation":"

    Specifies how many concurrent users to start with. The value should be between 1 and 3.

    " }, "SpawnRate":{ "shape":"SpawnRate", @@ -26991,7 +27140,7 @@ }, "DurationInSeconds":{ "shape":"TrafficDurationInSeconds", - "documentation":"

    Specifies how long traffic phase should be.

    " + "documentation":"

    Specifies how long a traffic phase should be. For custom load tests, the value should be between 120 and 3600. This value should not exceed JobDurationInSeconds.

    " } }, "documentation":"

    Defines the traffic pattern.

    " @@ -27422,6 +27571,16 @@ "min":1, "pattern":".*" }, + "PredefinedMetricSpecification":{ + "type":"structure", + "members":{ + "PredefinedMetricType":{ + "shape":"String", + "documentation":"

    The metric type. You can only apply SageMaker metric types to SageMaker endpoints.

    " + } + }, + "documentation":"

    A specification for a predefined metric.

    " + }, "PresignedDomainUrl":{"type":"string"}, "ProbabilityThresholdAttribute":{"type":"double"}, "ProblemType":{ @@ -29054,7 +29213,7 @@ }, "JobDurationInSeconds":{ "shape":"JobDurationInSeconds", - "documentation":"

    Specifies the maximum duration of the job, in seconds.>

    " + "documentation":"

    Specifies the maximum duration of the job, in seconds. The maximum value is 7200.

    " }, "TrafficPattern":{ "shape":"TrafficPattern", @@ -29160,6 +29319,10 @@ "ModelLatencyThresholds":{ "shape":"ModelLatencyThresholds", "documentation":"

    The interval of time taken by a model to respond as viewed from SageMaker. The interval includes the local communication time taken to send the request and to fetch the response from the container of a model and the time taken to complete the inference in the container.

    " + }, + "FlatInvocations":{ + "shape":"FlatInvocations", + "documentation":"

    Stops a load test when the number of invocations (TPS) peaks and flattens, which means that the instance has reached capacity. The default value is Stop. If you want the load test to continue after invocations have flattened, set the value to Continue.

    " } }, "documentation":"

    Specifies conditions for stopping a job. When a job reaches a stopping condition limit, SageMaker ends the job.

    " @@ -29995,6 +30158,49 @@ "max":100, "min":0 }, + "ScalingPolicies":{ + "type":"list", + "member":{"shape":"ScalingPolicy"} + }, + "ScalingPolicy":{ + "type":"structure", + "members":{ + "TargetTracking":{ + "shape":"TargetTrackingScalingPolicyConfiguration", + "documentation":"

    A target tracking scaling policy. Includes support for predefined or customized metrics.

    " + } + }, + "documentation":"

    An object containing a recommended scaling policy.

    ", + "union":true + }, + "ScalingPolicyMetric":{ + "type":"structure", + "members":{ + "InvocationsPerInstance":{ + "shape":"Integer", + "documentation":"

    The number of invocations sent to a model, normalized by InstanceCount in each ProductionVariant. 1/numberOfInstances is sent as the value on each request, where numberOfInstances is the number of active instances for the ProductionVariant behind the endpoint at the time of the request.

    " + }, + "ModelLatency":{ + "shape":"Integer", + "documentation":"

    The interval of time taken by a model to respond as viewed from SageMaker. This interval includes the local communication times taken to send the request and to fetch the response from the container of a model and the time taken to complete the inference in the container.

    " + } + }, + "documentation":"

    The metric for a scaling policy.

    " + }, + "ScalingPolicyObjective":{ + "type":"structure", + "members":{ + "MinInvocationsPerMinute":{ + "shape":"Integer", + "documentation":"

    The minimum number of expected requests to your endpoint per minute.

    " + }, + "MaxInvocationsPerMinute":{ + "shape":"Integer", + "documentation":"

    The maximum number of expected requests to your endpoint per minute.

    " + } + }, + "documentation":"

    An object where you specify the anticipated traffic pattern for an endpoint.

    " + }, "ScheduleConfig":{ "type":"structure", "required":["ScheduleExpression"], @@ -30716,6 +30922,24 @@ "STOPPED" ] }, + "Stairs":{ + "type":"structure", + "members":{ + "DurationInSeconds":{ + "shape":"TrafficDurationInSeconds", + "documentation":"

    Defines how long each traffic step should be.

    " + }, + "NumberOfSteps":{ + "shape":"NumberOfSteps", + "documentation":"

    Specifies how many steps to perform during traffic.

    " + }, + "UsersPerStep":{ + "shape":"UsersPerStep", + "documentation":"

    Specifies how many new users to spawn in each step.

    " + } + }, + "documentation":"

    Defines the stairs traffic pattern for an Inference Recommender load test. This pattern type consists of multiple steps where the number of users increases at each step.

    Specify either the stairs or phases traffic pattern.

    " + }, "StartEdgeDeploymentStageRequest":{ "type":"structure", "required":[ @@ -30820,6 +31044,16 @@ } } }, + "Statistic":{ + "type":"string", + "enum":[ + "Average", + "Minimum", + "Maximum", + "SampleCount", + "Sum" + ] + }, "StatusDetails":{ "type":"string", "max":1024, @@ -31417,6 +31651,20 @@ "LINUX" ] }, + "TargetTrackingScalingPolicyConfiguration":{ + "type":"structure", + "members":{ + "MetricSpecification":{ + "shape":"MetricSpecification", + "documentation":"

    An object containing information about a metric.

    " + }, + "TargetValue":{ + "shape":"Double", + "documentation":"

    The recommended target value to specify for the metric when creating a scaling policy.

    " + } + }, + "documentation":"

    A target tracking scaling policy. Includes support for predefined or customized metrics.

    When using the PutScalingPolicy API, this parameter is required when you are creating a policy with the policy type TargetTrackingScaling.

    " + }, "TaskAvailabilityLifetimeInSeconds":{ "type":"integer", "min":60 @@ -31512,6 +31760,10 @@ }, "TextClassificationJobConfig":{ "type":"structure", + "required":[ + "ContentColumn", + "TargetLabelColumn" + ], "members":{ "CompletionCriteria":{ "shape":"AutoMLJobCompletionCriteria", @@ -31519,11 +31771,11 @@ }, "ContentColumn":{ "shape":"ContentColumn", - "documentation":"

    The name of the column used to provide the sentences to be classified. It should not be the same as the target column (Required).

    " + "documentation":"

    The name of the column used to provide the sentences to be classified. It should not be the same as the target column.

    " }, "TargetLabelColumn":{ "shape":"TargetLabelColumn", - "documentation":"

    The name of the column used to provide the class labels. It should not be same as the content column (Required).

    " + "documentation":"

    The name of the column used to provide the class labels. It should not be same as the content column.

    " } }, "documentation":"

    Stores the configuration information for the text classification problem of an AutoML job V2.

    " @@ -31639,11 +31891,15 @@ "members":{ "TrafficType":{ "shape":"TrafficType", - "documentation":"

    Defines the traffic patterns.

    " + "documentation":"

    Defines the traffic patterns. Choose either PHASES or STAIRS.

    " }, "Phases":{ "shape":"Phases", "documentation":"

    Defines the phases traffic specification.

    " + }, + "Stairs":{ + "shape":"Stairs", + "documentation":"

    Defines the stairs traffic pattern.

    " } }, "documentation":"

    Defines the traffic pattern of the load test.

    " @@ -31684,7 +31940,10 @@ }, "TrafficType":{ "type":"string", - "enum":["PHASES"] + "enum":[ + "PHASES", + "STAIRS" + ] }, "TrainingContainerArgument":{ "type":"string", @@ -31804,7 +32063,8 @@ "ml.g5.48xlarge", "ml.trn1.2xlarge", "ml.trn1.32xlarge", - "ml.trn1n.32xlarge" + "ml.trn1n.32xlarge", + "ml.p5.48xlarge" ] }, "TrainingInstanceTypes":{ @@ -31938,6 +32198,7 @@ "shape":"DebugRuleEvaluationStatuses", "documentation":"

    Information about the evaluation status of the rules for the training job.

    " }, + "ProfilerConfig":{"shape":"ProfilerConfig"}, "Environment":{ "shape":"TrainingEnvironmentMap", "documentation":"

    The environment variables to set in the Docker container.

    " @@ -32343,7 +32604,8 @@ "Tags":{ "shape":"TagList", "documentation":"

    A list of tags associated with the transform job.

    " - } + }, + "DataCaptureConfig":{"shape":"BatchDataCaptureConfig"} }, "documentation":"

    A batch transform job. For information about SageMaker batch transform, see Use Batch Transform.

    " }, @@ -33635,8 +33897,8 @@ "required":["ModelCardName"], "members":{ "ModelCardName":{ - "shape":"EntityName", - "documentation":"

    The name of the model card to update.

    " + "shape":"ModelCardNameOrArn", + "documentation":"

    The name or Amazon Resource Name (ARN) of the model card to update.

    " }, "Content":{ "shape":"ModelCardContent", @@ -34328,10 +34590,20 @@ }, "documentation":"

    A collection of settings that apply to users of Amazon SageMaker Studio. These settings are specified when the CreateUserProfile API is called, and as DefaultUserSettings when the CreateDomain API is called.

    SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain.

    " }, + "UsersPerStep":{ + "type":"integer", + "max":3, + "min":1 + }, "UtilizationMetric":{ "type":"float", "min":0.0 }, + "UtilizationPercentagePerCore":{ + "type":"integer", + "max":100, + "min":1 + }, "ValidationFraction":{ "type":"float", "max":1, diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index b1038a9b091..b989ee0caeb 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index b7d2b021d15..e5be558fa38 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index 1ce9d53060a..aee2c052add 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakergeospatial/pom.xml b/services/sagemakergeospatial/pom.xml index 98be40f4b85..8dc297b4967 100644 --- a/services/sagemakergeospatial/pom.xml +++ b/services/sagemakergeospatial/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sagemakergeospatial AWS Java SDK :: Services :: Sage Maker Geospatial diff --git a/services/sagemakermetrics/pom.xml b/services/sagemakermetrics/pom.xml index c576551a8cd..ae3b905268f 100644 --- a/services/sagemakermetrics/pom.xml +++ b/services/sagemakermetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sagemakermetrics AWS Java SDK :: Services :: Sage Maker Metrics diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index eaa2a17e99e..9805741fb18 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 9a3fbbc271d..061a8bf1f80 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/scheduler/pom.xml b/services/scheduler/pom.xml index 48de37f1194..f7cc8545bbe 100644 --- a/services/scheduler/pom.xml +++ b/services/scheduler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT scheduler AWS Java SDK :: Services :: Scheduler diff --git a/services/scheduler/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/scheduler/src/main/resources/codegen-resources/endpoint-rule-set.json index 35335693868..4913426138f 100644 --- a/services/scheduler/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/scheduler/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,14 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -62,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -131,168 +111,238 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + }, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scheduler-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://scheduler-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scheduler-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://scheduler-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scheduler.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://scheduler.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scheduler.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://scheduler.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/scheduler/src/main/resources/codegen-resources/service-2.json b/services/scheduler/src/main/resources/codegen-resources/service-2.json index 6c44b7c975d..f9dbfdb1ccb 100644 --- a/services/scheduler/src/main/resources/codegen-resources/service-2.json +++ b/services/scheduler/src/main/resources/codegen-resources/service-2.json @@ -231,6 +231,13 @@ } }, "shapes":{ + "ActionAfterCompletion":{ + "type":"string", + "enum":[ + "NONE", + "DELETE" + ] + }, "AssignPublicIp":{ "type":"string", "enum":[ @@ -356,6 +363,10 @@ "Target" ], "members":{ + "ActionAfterCompletion":{ + "shape":"ActionAfterCompletion", + "documentation":"

    Specifies the action that EventBridge Scheduler applies to the schedule after the schedule completes invoking the target.

    " + }, "ClientToken":{ "shape":"ClientToken", "documentation":"

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, EventBridge Scheduler uses a randomly generated token for the request to ensure idempotency.

    ", @@ -389,7 +400,7 @@ }, "ScheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"

    The expression that defines when the schedule runs. The following formats are supported.

    • at expression - at(yyyy-mm-ddThh:mm:ss)

    • rate expression - rate(unit value)

    • cron expression - cron(fields)

    You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.

    A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).

    A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days

    For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.

    " + "documentation":"

    The expression that defines when the schedule runs. The following formats are supported.

    • at expression - at(yyyy-mm-ddThh:mm:ss)

    • rate expression - rate(value unit)

    • cron expression - cron(fields)

    You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.

    A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).

    A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days

    For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.

    " }, "ScheduleExpressionTimezone":{ "shape":"ScheduleExpressionTimezone", @@ -670,6 +681,10 @@ "GetScheduleOutput":{ "type":"structure", "members":{ + "ActionAfterCompletion":{ + "shape":"ActionAfterCompletion", + "documentation":"

    Indicates the action that EventBridge Scheduler applies to the schedule after the schedule completes invoking the target.

    " + }, "Arn":{ "shape":"ScheduleArn", "documentation":"

    The Amazon Resource Name (ARN) of the schedule.

    " @@ -708,7 +723,7 @@ }, "ScheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"

    The expression that defines when the schedule runs. The following formats are supported.

    • at expression - at(yyyy-mm-ddThh:mm:ss)

    • rate expression - rate(unit value)

    • cron expression - cron(fields)

    You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.

    A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).

    A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days

    For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.

    " + "documentation":"

    The expression that defines when the schedule runs. The following formats are supported.

    • at expression - at(yyyy-mm-ddThh:mm:ss)

    • rate expression - rate(value unit)

    • cron expression - cron(fields)

    You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.

    A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).

    A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days

    For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.

    " }, "ScheduleExpressionTimezone":{ "shape":"ScheduleExpressionTimezone", @@ -1474,6 +1489,10 @@ "Target" ], "members":{ + "ActionAfterCompletion":{ + "shape":"ActionAfterCompletion", + "documentation":"

    Specifies the action that EventBridge Scheduler applies to the schedule after the schedule completes invoking the target.

    " + }, "ClientToken":{ "shape":"ClientToken", "documentation":"

    Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, EventBridge Scheduler uses a randomly generated token for the request to ensure idempotency.

    ", @@ -1507,7 +1526,7 @@ }, "ScheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"

    The expression that defines when the schedule runs. The following formats are supported.

    • at expression - at(yyyy-mm-ddThh:mm:ss)

    • rate expression - rate(unit value)

    • cron expression - cron(fields)

    You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.

    A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).

    A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days

    For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.

    " + "documentation":"

    The expression that defines when the schedule runs. The following formats are supported.

    • at expression - at(yyyy-mm-ddThh:mm:ss)

    • rate expression - rate(value unit)

    • cron expression - cron(fields)

    You can use at expressions to create one-time schedules that invoke a target once, at the time and in the time zone, that you specify. You can use rate and cron expressions to create recurring schedules. Rate-based schedules are useful when you want to invoke a target at regular intervals, such as every 15 minutes or every five days. Cron-based schedules are useful when you want to invoke a target periodically at a specific time, such as at 8:00 am (UTC+0) every 1st day of the month.

    A cron expression consists of six fields separated by white spaces: (minutes hours day_of_month month day_of_week year).

    A rate expression consists of a value as a positive integer, and a unit with the following options: minute | minutes | hour | hours | day | days

    For more information and examples, see Schedule types on EventBridge Scheduler in the EventBridge Scheduler User Guide.

    " }, "ScheduleExpressionTimezone":{ "shape":"ScheduleExpressionTimezone", diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index fa0040387d7..08aa38e4384 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 635d04fb625..7fad7482eef 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/secretsmanager/src/main/resources/codegen-resources/service-2.json b/services/secretsmanager/src/main/resources/codegen-resources/service-2.json index ef5169e86fb..b74b8b2b4d1 100644 --- a/services/secretsmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/secretsmanager/src/main/resources/codegen-resources/service-2.json @@ -172,6 +172,7 @@ "output":{"shape":"ListSecretsResponse"}, "errors":[ {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, {"shape":"InvalidNextTokenException"}, {"shape":"InternalServiceError"} ], diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index d151782d1a0..049d03dce9b 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index b6f2a5044c1..db6338f2b77 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -1504,7 +1504,7 @@ }, "IsTerminal":{ "shape":"Boolean", - "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
 The default value of this field is false.

    " + "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.

    " }, "Criteria":{ "shape":"AutomationRulesFindingFilters", @@ -1737,7 +1737,7 @@ }, "IsTerminal":{ "shape":"Boolean", - "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
 The default value of this field is false.

    " + "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.

    " }, "CreatedAt":{ "shape":"Timestamp", @@ -2544,6 +2544,62 @@ }, "documentation":"

    Specifies the authorization configuration for using Amazon Cognito user pools with your AppSync GraphQL API endpoint.

    " }, + "AwsAthenaWorkGroupConfigurationDetails":{ + "type":"structure", + "members":{ + "ResultConfiguration":{ + "shape":"AwsAthenaWorkGroupConfigurationResultConfigurationDetails", + "documentation":"

    The location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. These are known as client-side settings. If workgroup settings override client-side settings, then the query uses the workgroup settings.

    " + } + }, + "documentation":"

    The configuration of the workgroup, which includes the location in Amazon Simple Storage Service (Amazon S3) where query results are stored, the encryption option, if any, used for query results, whether Amazon CloudWatch metrics are enabled for the workgroup, and the limit for the amount of bytes scanned (cutoff) per query, if it is specified.

    " + }, + "AwsAthenaWorkGroupConfigurationResultConfigurationDetails":{ + "type":"structure", + "members":{ + "EncryptionConfiguration":{ + "shape":"AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails", + "documentation":"

    Specifies the method used to encrypt the user’s data stores in the Athena workgroup.

    " + } + }, + "documentation":"

    The location in Amazon Simple Storage Service (Amazon S3) where query and calculation results are stored and the encryption option, if any, used for query and calculation results. These are known as client-side settings. If workgroup settings override client-side settings, then the query uses the workgroup settings.

    " + }, + "AwsAthenaWorkGroupConfigurationResultConfigurationEncryptionConfigurationDetails":{ + "type":"structure", + "members":{ + "EncryptionOption":{ + "shape":"NonEmptyString", + "documentation":"

    Indicates whether Amazon Simple Storage Service (Amazon S3) server-side encryption with Amazon S3 managed keys (SSE_S3), server-side encryption with KMS keys (SSE_KMS), or client-side encryption with KMS customer managed keys (CSE_KMS) is used.

    " + }, + "KmsKey":{ + "shape":"NonEmptyString", + "documentation":"

    For SSE_KMS and CSE_KMS, this is the KMS key Amazon Resource Name (ARN) or ID.

    " + } + }, + "documentation":"

    Specifies the method used to encrypt the user’s data stores in the Athena workgroup.

    " + }, + "AwsAthenaWorkGroupDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"

    The workgroup name.

    " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

    The workgroup description.

    " + }, + "State":{ + "shape":"NonEmptyString", + "documentation":"

    Whether the workgroup is enabled or disabled.

    " + }, + "Configuration":{ + "shape":"AwsAthenaWorkGroupConfigurationDetails", + "documentation":"

    The configuration of the workgroup, which includes the location in Amazon Simple Storage Service (Amazon S3) where query results are stored, the encryption option, if any, used for query results, whether Amazon CloudWatch metrics are enabled for the workgroup, and the limit for the amount of bytes scanned (cutoff) per query, if it is specified.

    " + } + }, + "documentation":"

    Provides information about an Amazon Athena workgroup.

    " + }, "AwsAutoScalingAutoScalingGroupAvailabilityZonesList":{ "type":"list", "member":{"shape":"AwsAutoScalingAutoScalingGroupAvailabilityZonesListDetails"} @@ -10406,6 +10462,24 @@ "type":"list", "member":{"shape":"AwsRdsDbClusterOptionGroupMembership"} }, + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute":{ + "type":"structure", + "members":{ + "AttributeName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the manual DB cluster snapshot attribute. The attribute named restore refers to the list of Amazon Web Services accounts that have permission to copy or restore the manual DB cluster snapshot.

    " + }, + "AttributeValues":{ + "shape":"NonEmptyStringList", + "documentation":"

    The value(s) for the manual DB cluster snapshot attribute. If the AttributeName field is set to restore, then this element returns a list of IDs of the Amazon Web Services accounts that are authorized to copy or restore the manual DB cluster snapshot. If a value of all is in the list, then the manual DB cluster snapshot is public and available for any Amazon Web Services account to copy or restore.

    " + } + }, + "documentation":"

    Contains the name and values of a manual Amazon Relational Database Service (RDS) DB cluster snapshot attribute.

    " + }, + "AwsRdsDbClusterSnapshotDbClusterSnapshotAttributes":{ + "type":"list", + "member":{"shape":"AwsRdsDbClusterSnapshotDbClusterSnapshotAttribute"} + }, "AwsRdsDbClusterSnapshotDetails":{ "type":"structure", "members":{ @@ -10480,6 +10554,10 @@ "IamDatabaseAuthenticationEnabled":{ "shape":"Boolean", "documentation":"

    Whether mapping of IAM accounts to database accounts is enabled.

    " + }, + "DbClusterSnapshotAttributes":{ + "shape":"AwsRdsDbClusterSnapshotDbClusterSnapshotAttributes", + "documentation":"

    Contains the name and values of a manual DB cluster snapshot attribute.

    " } }, "documentation":"

    Information about an Amazon RDS DB cluster snapshot.

    " @@ -14712,7 +14790,7 @@ }, "IsTerminal":{ "shape":"Boolean", - "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. The default value of this field is false.

    " + "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.

    " }, "Criteria":{ "shape":"AutomationRulesFindingFilters", @@ -16558,20 +16636,22 @@ }, "Value":{ "shape":"NonEmptyString", - "documentation":"

    The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called Department might be Security. If you provide security as the filter value, then there is no match.

    " + "documentation":"

    The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called Department might be Security. If you provide security as the filter value, then there's no match.

    " }, "Comparison":{ "shape":"MapFilterComparison", - "documentation":"

    The condition to apply to the key value when querying for findings with a map filter.

    To search for values that exactly match the filter value, use EQUALS. For example, for the ResourceTags field, the filter Department EQUALS Security matches findings that have the value Security for the tag Department.

    To search for values other than the filter value, use NOT_EQUALS. For example, for the ResourceTags field, the filter Department NOT_EQUALS Finance matches findings that do not have the value Finance for the tag Department.

    EQUALS filters on the same field are joined by OR. A finding matches if it matches any one of those filters.

    NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters.

    You cannot have both an EQUALS filter and a NOT_EQUALS filter on the same field.

    " + "documentation":"

    The condition to apply to the key value when filtering Security Hub findings with a map filter.

    To search for values that have the filter value, use one of the following comparison operators:

    • To search for values that include the filter value, use CONTAINS. For example, for the ResourceTags field, the filter Department CONTAINS Security matches findings that include the value Security for the Department tag. In the same example, a finding with a value of Security team for the Department tag is a match.

    • To search for values that exactly match the filter value, use EQUALS. For example, for the ResourceTags field, the filter Department EQUALS Security matches findings that have the value Security for the Department tag.

    CONTAINS and EQUALS filters on the same field are joined by OR. A finding matches if it matches any one of those filters. For example, the filters Department CONTAINS Security OR Department CONTAINS Finance match a finding that includes either Security, Finance, or both values.

    To search for values that don't have the filter value, use one of the following comparison operators:

    • To search for values that exclude the filter value, use NOT_CONTAINS. For example, for the ResourceTags field, the filter Department NOT_CONTAINS Finance matches findings that exclude the value Finance for the Department tag.

    • To search for values other than the filter value, use NOT_EQUALS. For example, for the ResourceTags field, the filter Department NOT_EQUALS Finance matches findings that don’t have the value Finance for the Department tag.

    NOT_CONTAINS and NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters. For example, the filters Department NOT_CONTAINS Security AND Department NOT_CONTAINS Finance match a finding that excludes both the Security and Finance values.

    CONTAINS filters can only be used with other CONTAINS filters. NOT_CONTAINS filters can only be used with other NOT_CONTAINS filters.

    You can’t have both a CONTAINS filter and a NOT_CONTAINS filter on the same field. Similarly, you can’t have both an EQUALS filter and a NOT_EQUALS filter on the same field. Combining filters in this way returns an error.

    CONTAINS and NOT_CONTAINS operators can be used only with automation rules. For more information, see Automation rules in the Security Hub User Guide.

    " } }, - "documentation":"

    A map filter for querying findings. Each map filter provides the field to check, the value to look for, and the comparison operator.

    " + "documentation":"

    A map filter for filtering Security Hub findings. Each map filter provides the field to check for, the value to check for, and the comparison operator.

    " }, "MapFilterComparison":{ "type":"string", "enum":[ "EQUALS", - "NOT_EQUALS" + "NOT_EQUALS", + "CONTAINS", + "NOT_CONTAINS" ] }, "MapFilterList":{ @@ -17625,6 +17705,10 @@ "AwsStepFunctionStateMachine":{ "shape":"AwsStepFunctionStateMachineDetails", "documentation":"

    Provides details about an Step Functions state machine, which is a workflow consisting of a series of event-driven steps.

    " + }, + "AwsAthenaWorkGroup":{ + "shape":"AwsAthenaWorkGroupDetails", + "documentation":"

    Provides information about an Amazon Athena workgroup. A workgroup helps you separate users, teams, applications, or workloads. It also helps you set limits on data processing and track costs.

    " } }, "documentation":"

    Additional details about a resource related to a finding.

    To provide the details, use the object that corresponds to the resource type. For example, if the resource type is AwsEc2Instance, then you use the AwsEc2Instance object to provide the details.

    If the type-specific object does not contain all of the fields you want to populate, then you use the Other object to populate those additional fields.

    You also use the Other object to populate the details when the selected type does not have a corresponding object.

    " @@ -18778,14 +18862,14 @@ "members":{ "Value":{ "shape":"NonEmptyString", - "documentation":"

    The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is Security Hub. If you provide security hub as the filter text, then there is no match.

    " + "documentation":"

    The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is Security Hub. If you provide security hub as the filter value, there's no match.

    " }, "Comparison":{ "shape":"StringFilterComparison", - "documentation":"

    The condition to apply to a string value when querying for findings. To search for values that contain the filter criteria value, use one of the following comparison operators:

    • To search for values that exactly match the filter value, use EQUALS.

      For example, the filter ResourceType EQUALS AwsEc2SecurityGroup only matches findings that have a resource type of AwsEc2SecurityGroup.

    • To search for values that start with the filter value, use PREFIX.

      For example, the filter ResourceType PREFIX AwsIam matches findings that have a resource type that starts with AwsIam. Findings with a resource type of AwsIamPolicy, AwsIamRole, or AwsIamUser would all match.

    EQUALS and PREFIX filters on the same field are joined by OR. A finding matches if it matches any one of those filters.

    To search for values that do not contain the filter criteria value, use one of the following comparison operators:

    • To search for values that do not exactly match the filter value, use NOT_EQUALS.

      For example, the filter ResourceType NOT_EQUALS AwsIamPolicy matches findings that have a resource type other than AwsIamPolicy.

    • To search for values that do not start with the filter value, use PREFIX_NOT_EQUALS.

      For example, the filter ResourceType PREFIX_NOT_EQUALS AwsIam matches findings that have a resource type that does not start with AwsIam. Findings with a resource type of AwsIamPolicy, AwsIamRole, or AwsIamUser would all be excluded from the results.

    NOT_EQUALS and PREFIX_NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters.

    For filters on the same field, you cannot provide both an EQUALS filter and a NOT_EQUALS or PREFIX_NOT_EQUALS filter. Combining filters in this way always returns an error, even if the provided filter values would return valid results.

    You can combine PREFIX filters with NOT_EQUALS or PREFIX_NOT_EQUALS filters for the same field. Security Hub first processes the PREFIX filters, then the NOT_EQUALS or PREFIX_NOT_EQUALS filters.

    For example, for the following filter, Security Hub first identifies findings that have resource types that start with either AwsIAM or AwsEc2. It then excludes findings that have a resource type of AwsIamPolicy and findings that have a resource type of AwsEc2NetworkInterface.

    • ResourceType PREFIX AwsIam

    • ResourceType PREFIX AwsEc2

    • ResourceType NOT_EQUALS AwsIamPolicy

    • ResourceType NOT_EQUALS AwsEc2NetworkInterface

    " + "documentation":"

    The condition to apply to a string value when filtering Security Hub findings.

    To search for values that have the filter value, use one of the following comparison operators:

    • To search for values that include the filter value, use CONTAINS. For example, the filter Title CONTAINS CloudFront matches findings that have a Title that includes the string CloudFront.

    • To search for values that exactly match the filter value, use EQUALS. For example, the filter AwsAccountId EQUALS 123456789012 only matches findings that have an account ID of 123456789012.

    • To search for values that start with the filter value, use PREFIX. For example, the filter ResourceRegion PREFIX us matches findings that have a ResourceRegion that starts with us. A ResourceRegion that starts with a different value, such as af, ap, or ca, doesn't match.

    CONTAINS, EQUALS, and PREFIX filters on the same field are joined by OR. A finding matches if it matches any one of those filters. For example, the filters Title CONTAINS CloudFront OR Title CONTAINS CloudWatch match a finding that includes either CloudFront, CloudWatch, or both strings in the title.

    To search for values that don’t have the filter value, use one of the following comparison operators:

    • To search for values that exclude the filter value, use NOT_CONTAINS. For example, the filter Title NOT_CONTAINS CloudFront matches findings that have a Title that excludes the string CloudFront.

    • To search for values other than the filter value, use NOT_EQUALS. For example, the filter AwsAccountId NOT_EQUALS 123456789012 only matches findings that have an account ID other than 123456789012.

    • To search for values that don't start with the filter value, use PREFIX_NOT_EQUALS. For example, the filter ResourceRegion PREFIX_NOT_EQUALS us matches findings with a ResourceRegion that starts with a value other than us.

    NOT_CONTAINS, NOT_EQUALS, and PREFIX_NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters. For example, the filters Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS CloudWatch match a finding that excludes both CloudFront and CloudWatch in the title.

    You can’t have both a CONTAINS filter and a NOT_CONTAINS filter on the same field. Similarly, you can't provide both an EQUALS filter and a NOT_EQUALS or PREFIX_NOT_EQUALS filter on the same field. Combining filters in this way returns an error. CONTAINS filters can only be used with other CONTAINS filters. NOT_CONTAINS filters can only be used with other NOT_CONTAINS filters.

    You can combine PREFIX filters with NOT_EQUALS or PREFIX_NOT_EQUALS filters for the same field. Security Hub first processes the PREFIX filters, and then the NOT_EQUALS or PREFIX_NOT_EQUALS filters.

    For example, for the following filters, Security Hub first identifies findings that have resource types that start with either AwsIam or AwsEc2. It then excludes findings that have a resource type of AwsIamPolicy and findings that have a resource type of AwsEc2NetworkInterface.

    • ResourceType PREFIX AwsIam

    • ResourceType PREFIX AwsEc2

    • ResourceType NOT_EQUALS AwsIamPolicy

    • ResourceType NOT_EQUALS AwsEc2NetworkInterface

    CONTAINS and NOT_CONTAINS operators can be used only with automation rules. For more information, see Automation rules in the Security Hub User Guide.

    " } }, - "documentation":"

    A string filter for querying findings.

    " + "documentation":"

    A string filter for filtering Security Hub findings.

    " }, "StringFilterComparison":{ "type":"string", @@ -18793,7 +18877,9 @@ "EQUALS", "PREFIX", "NOT_EQUALS", - "PREFIX_NOT_EQUALS" + "PREFIX_NOT_EQUALS", + "CONTAINS", + "NOT_CONTAINS" ] }, "StringFilterList":{ @@ -19132,7 +19218,7 @@ }, "IsTerminal":{ "shape":"Boolean", - "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If the value of this field is set to true for a rule, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding.
 The default value of this field is false.

    " + "documentation":"

    Specifies whether a rule is the last to be applied with respect to a finding that matches the rule criteria. This is useful when a finding matches the criteria for multiple rules, and each rule has different actions. If a rule is terminal, Security Hub applies the rule action to a finding that matches the rule criteria and doesn't evaluate other rules for the finding. By default, a rule isn't terminal.

    " }, "Criteria":{ "shape":"AutomationRulesFindingFilters", diff --git a/services/securitylake/pom.xml b/services/securitylake/pom.xml index 47be9f73df3..5653d935b80 100644 --- a/services/securitylake/pom.xml +++ b/services/securitylake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT securitylake AWS Java SDK :: Services :: Security Lake diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index 30326f1d2e9..6c3d2532f72 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index ebe46083227..cfefb45193e 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalog/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/servicecatalog/src/main/resources/codegen-resources/endpoint-rule-set.json index 5c89793f98f..510eb10b1af 100644 --- a/services/servicecatalog/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/servicecatalog/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://servicecatalog-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://servicecatalog-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://servicecatalog-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://servicecatalog-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://servicecatalog.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://servicecatalog.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://servicecatalog.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://servicecatalog.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/servicecatalog/src/main/resources/codegen-resources/service-2.json b/services/servicecatalog/src/main/resources/codegen-resources/service-2.json index c228fbebef6..933371fc974 100644 --- a/services/servicecatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/servicecatalog/src/main/resources/codegen-resources/service-2.json @@ -780,7 +780,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParametersException"} ], - "documentation":"

    Requests the import of a resource as an Service Catalog provisioned product that is associated to an Service Catalog product and provisioning artifact. Once imported, all supported governance actions are supported on the provisioned product.

    Resource import only supports CloudFormation stack ARNs. CloudFormation StackSets, and non-root nested stacks are not supported.

    The CloudFormation stack must have one of the following statuses to be imported: CREATE_COMPLETE, UPDATE_COMPLETE, UPDATE_ROLLBACK_COMPLETE, IMPORT_COMPLETE, and IMPORT_ROLLBACK_COMPLETE.

    Import of the resource requires that the CloudFormation stack template matches the associated Service Catalog product provisioning artifact.

    When you import an existing CloudFormation stack into a portfolio, constraints that are associated with the product aren't applied during the import process. The constraints are applied after you call UpdateProvisionedProduct for the provisioned product.

    The user or role that performs this operation must have the cloudformation:GetTemplate and cloudformation:DescribeStacks IAM policy permissions.

    " + "documentation":"

    Requests the import of a resource as an Service Catalog provisioned product that is associated to an Service Catalog product and provisioning artifact. Once imported, all supported governance actions are supported on the provisioned product.

    Resource import only supports CloudFormation stack ARNs. CloudFormation StackSets, and non-root nested stacks, are not supported.

    The CloudFormation stack must have one of the following statuses to be imported: CREATE_COMPLETE, UPDATE_COMPLETE, UPDATE_ROLLBACK_COMPLETE, IMPORT_COMPLETE, and IMPORT_ROLLBACK_COMPLETE.

    Import of the resource requires that the CloudFormation stack template matches the associated Service Catalog product provisioning artifact.

    When you import an existing CloudFormation stack into a portfolio, Service Catalog does not apply the product's associated constraints during the import process. Service Catalog applies the constraints after you call UpdateProvisionedProduct for the provisioned product.

    The user or role that performs this operation must have the cloudformation:GetTemplate and cloudformation:DescribeStacks IAM policy permissions.

    You can only import one provisioned product at a time. The product's CloudFormation stack must have the IMPORT_COMPLETE status before you import another.

    " }, "ListAcceptedPortfolioShares":{ "name":"ListAcceptedPortfolioShares", @@ -1213,7 +1213,7 @@ {"shape":"OperationNotSupportedException"}, {"shape":"InvalidStateException"} ], - "documentation":"

    Updates the specified portfolio share. You can use this API to enable or disable TagOptions sharing or Principal sharing for an existing portfolio share.

    The portfolio share cannot be updated if the CreatePortfolioShare operation is IN_PROGRESS, as the share is not available to recipient entities. In this case, you must wait for the portfolio share to be COMPLETED.

    You must provide the accountId or organization node in the input, but not both.

    If the portfolio is shared to both an external account and an organization node, and both shares need to be updated, you must invoke UpdatePortfolioShare separately for each share type.

    This API cannot be used for removing the portfolio share. You must use DeletePortfolioShare API for that action.

    When you associate a principal with portfolio, a potential privilege escalation path may occur when that portfolio is then shared with other accounts. For a user in a recipient account who is not an Service Catalog Admin, but still has the ability to create Principals (Users/Groups/Roles), that user could create a role that matches a principal name association for the portfolio. Although this user may not know which principal names are associated through Service Catalog, they may be able to guess the user. If this potential escalation path is a concern, then Service Catalog recommends using PrincipalType as IAM. With this configuration, the PrincipalARN must already exist in the recipient account before it can be associated.

    " + "documentation":"

    Updates the specified portfolio share. You can use this API to enable or disable TagOptions sharing or Principal sharing for an existing portfolio share.

    The portfolio share cannot be updated if the CreatePortfolioShare operation is IN_PROGRESS, as the share is not available to recipient entities. In this case, you must wait for the portfolio share to be completed.

    You must provide the accountId or organization node in the input, but not both.

    If the portfolio is shared to both an external account and an organization node, and both shares need to be updated, you must invoke UpdatePortfolioShare separately for each share type.

    This API cannot be used for removing the portfolio share. You must use DeletePortfolioShare API for that action.

    When you associate a principal with portfolio, a potential privilege escalation path may occur when that portfolio is then shared with other accounts. For a user in a recipient account who is not an Service Catalog Admin, but still has the ability to create Principals (Users/Groups/Roles), that user could create a role that matches a principal name association for the portfolio. Although this user may not know which principal names are associated through Service Catalog, they may be able to guess the user. If this potential escalation path is a concern, then Service Catalog recommends using PrincipalType as IAM. With this configuration, the PrincipalARN must already exist in the recipient account before it can be associated.

    " }, "UpdateProduct":{ "name":"UpdateProduct", @@ -1341,7 +1341,7 @@ }, "Value":{ "shape":"AccessLevelFilterValue", - "documentation":"

    The user to which the access level applies. The only supported value is Self.

    " + "documentation":"

    The user to which the access level applies. The only supported value is self.

    " } }, "documentation":"

    The access level to use to filter results.

    " @@ -4579,7 +4579,8 @@ "enum":[ "CLOUD_FORMATION_TEMPLATE", "MARKETPLACE", - "TERRAFORM_OPEN_SOURCE" + "TERRAFORM_OPEN_SOURCE", + "TERRAFORM_CLOUD" ], "max":8191 }, @@ -4863,11 +4864,11 @@ }, "LastProvisioningRecordId":{ "shape":"Id", - "documentation":"

    The record identifier of the last request performed on this provisioned product of the following types:

    • ProvisionedProduct

    • UpdateProvisionedProduct

    • ExecuteProvisionedProductPlan

    • TerminateProvisionedProduct

    " + "documentation":"

    The record identifier of the last request performed on this provisioned product of the following types:

    • ProvisionProduct

    • UpdateProvisionedProduct

    • ExecuteProvisionedProductPlan

    • TerminateProvisionedProduct

    " }, "LastSuccessfulProvisioningRecordId":{ "shape":"Id", - "documentation":"

    The record identifier of the last successful request performed on this provisioned product of the following types:

    • ProvisionedProduct

    • UpdateProvisionedProduct

    • ExecuteProvisionedProductPlan

    • TerminateProvisionedProduct

    " + "documentation":"

    The record identifier of the last successful request performed on this provisioned product of the following types:

    • ProvisionProduct

    • UpdateProvisionedProduct

    • ExecuteProvisionedProductPlan

    • TerminateProvisionedProduct

    " }, "Tags":{ "shape":"Tags", @@ -4949,11 +4950,11 @@ }, "LastProvisioningRecordId":{ "shape":"Id", - "documentation":"

    The record identifier of the last request performed on this provisioned product of the following types:

    • ProvisionedProduct

    • UpdateProvisionedProduct

    • ExecuteProvisionedProductPlan

    • TerminateProvisionedProduct

    " + "documentation":"

    The record identifier of the last request performed on this provisioned product of the following types:

    • ProvisionProduct

    • UpdateProvisionedProduct

    • ExecuteProvisionedProductPlan

    • TerminateProvisionedProduct

    " }, "LastSuccessfulProvisioningRecordId":{ "shape":"Id", - "documentation":"

    The record identifier of the last successful request performed on this provisioned product of the following types:

    • ProvisionedProduct

    • UpdateProvisionedProduct

    • ExecuteProvisionedProductPlan

    • TerminateProvisionedProduct

    " + "documentation":"

    The record identifier of the last successful request performed on this provisioned product of the following types:

    • ProvisionProduct

    • UpdateProvisionedProduct

    • ExecuteProvisionedProductPlan

    • TerminateProvisionedProduct

    " }, "ProductId":{ "shape":"Id", @@ -5185,7 +5186,7 @@ }, "Type":{ "shape":"ProvisioningArtifactType", - "documentation":"

    The type of provisioning artifact.

    • CLOUD_FORMATION_TEMPLATE - CloudFormation template

    • MARKETPLACE_AMI - Amazon Web Services Marketplace AMI

    • MARKETPLACE_CAR - Amazon Web Services Marketplace Clusters and Amazon Web Services Resources

    " + "documentation":"

    The type of provisioning artifact.

    CLOUD_FORMATION_TEMPLATE - CloudFormation template

    " }, "CreatedTime":{ "shape":"CreationTime", @@ -5320,11 +5321,11 @@ }, "Type":{ "shape":"ProvisioningArtifactType", - "documentation":"

    The type of provisioning artifact.

    • CLOUD_FORMATION_TEMPLATE - CloudFormation template

    • MARKETPLACE_AMI - Amazon Web Services Marketplace AMI

    • MARKETPLACE_CAR - Amazon Web Services Marketplace Clusters and Amazon Web Services Resources

    • TERRAFORM_OPEN_SOURCE - Terraform open source configuration file

    " + "documentation":"

    The type of provisioning artifact.

    • CLOUD_FORMATION_TEMPLATE - CloudFormation template

    • TERRAFORM_OPEN_SOURCE - Terraform open source configuration file

    " }, "DisableTemplateValidation":{ "shape":"DisableTemplateValidation", - "documentation":"

    If set to true, Service Catalog stops validating the specified provisioning artifact even if it is invalid.

    " + "documentation":"

    If set to true, Service Catalog stops validating the specified provisioning artifact even if it is invalid.

    Service Catalog does not support template validation for the TERRAFORM_OS product type.

    " } }, "documentation":"

    Information about a provisioning artifact (also known as a version) for a product.

    " @@ -5370,7 +5371,8 @@ "CLOUD_FORMATION_TEMPLATE", "MARKETPLACE_AMI", "MARKETPLACE_CAR", - "TERRAFORM_OPEN_SOURCE" + "TERRAFORM_OPEN_SOURCE", + "TERRAFORM_CLOUD" ] }, "ProvisioningArtifactView":{ @@ -5468,7 +5470,7 @@ }, "ProvisionedProductType":{ "shape":"ProvisionedProductType", - "documentation":"

    The type of provisioned product. The supported values are CFN_STACK and CFN_STACKSET.

    " + "documentation":"

    The type of provisioned product. The supported values are CFN_STACK, CFN_STACKSET, TERRAFORM_OPEN_SOURCE, and TERRAFORM_CLOUD.

    " }, "RecordType":{ "shape":"RecordType", @@ -5947,7 +5949,7 @@ }, "Filters":{ "shape":"ProvisionedProductFilters", - "documentation":"

    The search filters.

    When the key is SearchQuery, the searchable fields are arn, createdTime, id, lastRecordId, idempotencyToken, name, physicalId, productId, provisioningArtifact, type, status, tags, userArn, userArnSession, lastProvisioningRecordId, lastSuccessfulProvisioningRecordId, productName, and provisioningArtifactName.

    Example: \"SearchQuery\":[\"status:AVAILABLE\"]

    " + "documentation":"

    The search filters.

    When the key is SearchQuery, the searchable fields are arn, createdTime, id, lastRecordId, idempotencyToken, name, physicalId, productId, provisioningArtifactId, type, status, tags, userArn, userArnSession, lastProvisioningRecordId, lastSuccessfulProvisioningRecordId, productName, and provisioningArtifactName.

    Example: \"SearchQuery\":[\"status:AVAILABLE\"]

    " }, "SortBy":{ "shape":"SortField", diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index 58c02124c4f..5a4fa7ee125 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 8ff747a3d86..34e9cd51d09 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index 480c984c98b..b2d90fbba0c 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/ses/pom.xml b/services/ses/pom.xml index c6a5c4c02d4..ea8beadd35e 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/ses/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/ses/src/main/resources/codegen-resources/endpoint-rule-set.json index 103753b59b9..1d567c5f974 100644 --- a/services/ses/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/ses/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,23 +45,20 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" } ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], "type": "tree", "rules": [ { @@ -71,67 +67,42 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", "type": "error" }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "Region" + } ] } ], @@ -140,90 +111,215 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ - true, { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsFIPS" + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://email-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } ] }, { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "supportsDualStack" + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://email-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "supportsFIPS" + true ] } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://email.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], "type": "tree", @@ -231,7 +327,7 @@ { "conditions": [], "endpoint": { - "url": "https://email-fips.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://email.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -240,74 +336,13 @@ ] } ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } ] }, { "conditions": [], - "endpoint": { - "url": "https://email.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } diff --git a/services/ses/src/main/resources/codegen-resources/endpoint-tests.json b/services/ses/src/main/resources/codegen-resources/endpoint-tests.json index a2a048c63dc..fa4feb98bdd 100644 --- a/services/ses/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/ses/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,822 +1,198 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://email.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-northeast-2.api.aws" + "url": "https://email.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-northeast-2.amazonaws.com" + "url": "https://email.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.ap-northeast-2.api.aws" + "url": "https://email.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, "Region": "ap-northeast-2", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.ap-northeast-2.amazonaws.com" + "url": "https://email.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-northeast-1.api.aws" + "url": "https://email.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-northeast-1.amazonaws.com" + "url": "https://email.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.ap-northeast-1.api.aws" + "url": "https://email.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.ap-northeast-1.amazonaws.com" + "url": "https://email.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.me-south-1.api.aws" + "url": "https://email.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.me-south-1.amazonaws.com" + "url": "https://email.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.me-south-1.api.aws" + "url": "https://email.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.me-south-1.amazonaws.com" + "url": "https://email.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.sa-east-1.api.aws" + "url": "https://email.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.sa-east-1.amazonaws.com" + "url": "https://email.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.sa-east-1.api.aws" + "url": "https://email.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -827,386 +203,365 @@ } }, "params": { - "UseDualStack": false, "Region": "sa-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://email-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.us-gov-west-1.amazonaws.com" + "url": "https://email.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.us-gov-west-1.api.aws" + "url": "https://email-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.us-gov-west-1.amazonaws.com" + "url": "https://email.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-southeast-1.api.aws" + "url": "https://email.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-southeast-1.amazonaws.com" + "url": "https://email.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.ap-southeast-1.api.aws" + "url": "https://email-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://email.ap-southeast-1.amazonaws.com" + "url": "https://email-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-southeast-2.api.aws" + "url": "https://email.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-southeast-2.amazonaws.com" + "url": "https://email-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.ap-southeast-2.api.aws" + "url": "https://email-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://email.ap-southeast-2.amazonaws.com" + "url": "https://email.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-southeast-3.api.aws" + "url": "https://email.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.ap-southeast-3.amazonaws.com" + "url": "https://email.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.ap-southeast-3.api.aws" + "url": "https://email-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://email.ap-southeast-3.amazonaws.com" + "url": "https://email-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.us-east-1.api.aws" + "url": "https://email-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://email-fips.us-east-1.amazonaws.com" + "url": "https://email.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.us-east-1.api.aws" + "url": "https://email.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://email.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.us-east-2.api.aws" + "url": "https://email-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://email-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.us-east-2.api.aws" + "url": "https://email.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://email.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://email-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://email-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://email.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://email.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://email.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1216,9 +571,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1228,11 +583,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/ses/src/main/resources/codegen-resources/service-2.json b/services/ses/src/main/resources/codegen-resources/service-2.json index 5c376aa4938..0e89c30bb7a 100644 --- a/services/ses/src/main/resources/codegen-resources/service-2.json +++ b/services/ses/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"AlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Creates a receipt rule set by cloning an existing one. All receipt rules and configurations are copied to the new receipt rule set and are completely independent of the source rule set.

    For information about setting up rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Creates a receipt rule set by cloning an existing one. All receipt rules and configurations are copied to the new receipt rule set and are completely independent of the source rule set.

    For information about setting up rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "CreateConfigurationSet":{ "name":"CreateConfigurationSet", @@ -47,7 +47,7 @@ {"shape":"InvalidConfigurationSetException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Creates a configuration set.

    Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Creates a configuration set.

    Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "CreateConfigurationSetEventDestination":{ "name":"CreateConfigurationSetEventDestination", @@ -68,7 +68,7 @@ {"shape":"InvalidSNSDestinationException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Creates a configuration set event destination.

    When you create or update an event destination, you must provide one, and only one, destination. The destination can be CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS).

    An event destination is the AWS service to which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Creates a configuration set event destination.

    When you create or update an event destination, you must provide one, and only one, destination. The destination can be CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS).

    An event destination is the Amazon Web Services service to which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "CreateConfigurationSetTrackingOptions":{ "name":"CreateConfigurationSetTrackingOptions", @@ -86,7 +86,7 @@ {"shape":"TrackingOptionsAlreadyExistsException"}, {"shape":"InvalidTrackingOptionsException"} ], - "documentation":"

    Creates an association between a configuration set and a custom domain for open and click event tracking.

    By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

    " + "documentation":"

    Creates an association between a configuration set and a custom domain for open and click event tracking.

    By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

    " }, "CreateCustomVerificationEmailTemplate":{ "name":"CreateCustomVerificationEmailTemplate", @@ -101,7 +101,7 @@ {"shape":"CustomVerificationEmailInvalidContentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Creates a new custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Creates a new custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "CreateReceiptFilter":{ "name":"CreateReceiptFilter", @@ -118,7 +118,7 @@ {"shape":"LimitExceededException"}, {"shape":"AlreadyExistsException"} ], - "documentation":"

    Creates a new IP address filter.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Creates a new IP address filter.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "CreateReceiptRule":{ "name":"CreateReceiptRule", @@ -140,7 +140,7 @@ {"shape":"RuleSetDoesNotExistException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Creates a receipt rule.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Creates a receipt rule.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "CreateReceiptRuleSet":{ "name":"CreateReceiptRuleSet", @@ -157,7 +157,7 @@ {"shape":"AlreadyExistsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Creates an empty receipt rule set.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Creates an empty receipt rule set.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "CreateTemplate":{ "name":"CreateTemplate", @@ -175,7 +175,7 @@ {"shape":"InvalidTemplateException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Creates an email template. Email templates enable you to send personalized email to one or more destinations in a single API operation. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Creates an email template. Email templates enable you to send personalized email to one or more destinations in a single operation. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DeleteConfigurationSet":{ "name":"DeleteConfigurationSet", @@ -191,7 +191,7 @@ "errors":[ {"shape":"ConfigurationSetDoesNotExistException"} ], - "documentation":"

    Deletes a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Deletes a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DeleteConfigurationSetEventDestination":{ "name":"DeleteConfigurationSetEventDestination", @@ -208,7 +208,7 @@ {"shape":"ConfigurationSetDoesNotExistException"}, {"shape":"EventDestinationDoesNotExistException"} ], - "documentation":"

    Deletes a configuration set event destination. Configuration set event destinations are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Deletes a configuration set event destination. Configuration set event destinations are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DeleteConfigurationSetTrackingOptions":{ "name":"DeleteConfigurationSetTrackingOptions", @@ -225,7 +225,7 @@ {"shape":"ConfigurationSetDoesNotExistException"}, {"shape":"TrackingOptionsDoesNotExistException"} ], - "documentation":"

    Deletes an association between a configuration set and a custom domain for open and click event tracking.

    By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

    Deleting this kind of association will result in emails sent using the specified configuration set to capture open and click events using the standard, Amazon SES-operated domains.

    " + "documentation":"

    Deletes an association between a configuration set and a custom domain for open and click event tracking.

    By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

    Deleting this kind of association results in emails sent using the specified configuration set to capture open and click events using the standard, Amazon SES-operated domains.

    " }, "DeleteCustomVerificationEmailTemplate":{ "name":"DeleteCustomVerificationEmailTemplate", @@ -234,7 +234,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteCustomVerificationEmailTemplateRequest"}, - "documentation":"

    Deletes an existing custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Deletes an existing custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DeleteIdentity":{ "name":"DeleteIdentity", @@ -260,7 +260,7 @@ "shape":"DeleteIdentityPolicyResponse", "resultWrapper":"DeleteIdentityPolicyResult" }, - "documentation":"

    Deletes the specified sending authorization policy for the given identity (an email address or a domain). This API returns successfully even if a policy with the specified name does not exist.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Deletes the specified sending authorization policy for the given identity (an email address or a domain). This operation returns successfully even if a policy with the specified name does not exist.

    This operation is for the identity owner only. If you have not verified the identity, it returns an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DeleteReceiptFilter":{ "name":"DeleteReceiptFilter", @@ -273,7 +273,7 @@ "shape":"DeleteReceiptFilterResponse", "resultWrapper":"DeleteReceiptFilterResult" }, - "documentation":"

    Deletes the specified IP address filter.

    For information about managing IP address filters, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Deletes the specified IP address filter.

    For information about managing IP address filters, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DeleteReceiptRule":{ "name":"DeleteReceiptRule", @@ -289,7 +289,7 @@ "errors":[ {"shape":"RuleSetDoesNotExistException"} ], - "documentation":"

    Deletes the specified receipt rule.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Deletes the specified receipt rule.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DeleteReceiptRuleSet":{ "name":"DeleteReceiptRuleSet", @@ -305,7 +305,7 @@ "errors":[ {"shape":"CannotDeleteException"} ], - "documentation":"

    Deletes the specified receipt rule set and all of the receipt rules it contains.

    The currently active rule set cannot be deleted.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Deletes the specified receipt rule set and all of the receipt rules it contains.

    The currently active rule set cannot be deleted.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DeleteTemplate":{ "name":"DeleteTemplate", @@ -340,7 +340,7 @@ "shape":"DescribeActiveReceiptRuleSetResponse", "resultWrapper":"DescribeActiveReceiptRuleSetResult" }, - "documentation":"

    Returns the metadata and receipt rules for the receipt rule set that is currently active.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns the metadata and receipt rules for the receipt rule set that is currently active.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DescribeConfigurationSet":{ "name":"DescribeConfigurationSet", @@ -356,7 +356,7 @@ "errors":[ {"shape":"ConfigurationSetDoesNotExistException"} ], - "documentation":"

    Returns the details of the specified configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns the details of the specified configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DescribeReceiptRule":{ "name":"DescribeReceiptRule", @@ -373,7 +373,7 @@ {"shape":"RuleDoesNotExistException"}, {"shape":"RuleSetDoesNotExistException"} ], - "documentation":"

    Returns the details of the specified receipt rule.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns the details of the specified receipt rule.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "DescribeReceiptRuleSet":{ "name":"DescribeReceiptRuleSet", @@ -389,7 +389,7 @@ "errors":[ {"shape":"RuleSetDoesNotExistException"} ], - "documentation":"

    Returns the details of the specified receipt rule set.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns the details of the specified receipt rule set.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "GetAccountSendingEnabled":{ "name":"GetAccountSendingEnabled", @@ -401,7 +401,7 @@ "shape":"GetAccountSendingEnabledResponse", "resultWrapper":"GetAccountSendingEnabledResult" }, - "documentation":"

    Returns the email sending status of the Amazon SES account for the current region.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns the email sending status of the Amazon SES account for the current Region.

    You can execute this operation no more than once per second.

    " }, "GetCustomVerificationEmailTemplate":{ "name":"GetCustomVerificationEmailTemplate", @@ -417,7 +417,7 @@ "errors":[ {"shape":"CustomVerificationEmailTemplateDoesNotExistException"} ], - "documentation":"

    Returns the custom email verification template for the template name you specify.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns the custom email verification template for the template name you specify.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "GetIdentityDkimAttributes":{ "name":"GetIdentityDkimAttributes", @@ -430,7 +430,7 @@ "shape":"GetIdentityDkimAttributesResponse", "resultWrapper":"GetIdentityDkimAttributesResult" }, - "documentation":"

    Returns the current status of Easy DKIM signing for an entity. For domain name identities, this operation also returns the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES has successfully verified that these tokens have been published.

    This operation takes a list of identities as input and returns the following information for each:

    • Whether Easy DKIM signing is enabled or disabled.

    • A set of DKIM tokens that represent the identity. If the identity is an email address, the tokens represent the domain of that address.

    • Whether Amazon SES has successfully verified the DKIM tokens published in the domain's DNS. This information is only returned for domain name identities, not for email addresses.

    This operation is throttled at one request per second and can only get DKIM attributes for up to 100 identities at a time.

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    " + "documentation":"

    Returns the current status of Easy DKIM signing for an entity. For domain name identities, this operation also returns the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES has successfully verified that these tokens have been published.

    This operation takes a list of identities as input and returns the following information for each:

    • Whether Easy DKIM signing is enabled or disabled.

    • A set of DKIM tokens that represent the identity. If the identity is an email address, the tokens represent the domain of that address.

    • Whether Amazon SES has successfully verified the DKIM tokens published in the domain's DNS. This information is only returned for domain name identities, not for email addresses.

    This operation is throttled at one request per second and can only get DKIM attributes for up to 100 identities at a time.

    For more information about creating DNS records using DKIM tokens, go to the Amazon SES Developer Guide.

    " }, "GetIdentityMailFromDomainAttributes":{ "name":"GetIdentityMailFromDomainAttributes", @@ -456,7 +456,7 @@ "shape":"GetIdentityNotificationAttributesResponse", "resultWrapper":"GetIdentityNotificationAttributesResult" }, - "documentation":"

    Given a list of verified identities (email addresses and/or domains), returns a structure describing identity notification attributes.

    This operation is throttled at one request per second and can only get notification attributes for up to 100 identities at a time.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    " + "documentation":"

    Given a list of verified identities (email addresses and/or domains), returns a structure describing identity notification attributes.

    This operation is throttled at one request per second and can only get notification attributes for up to 100 identities at a time.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    " }, "GetIdentityPolicies":{ "name":"GetIdentityPolicies", @@ -469,7 +469,7 @@ "shape":"GetIdentityPoliciesResponse", "resultWrapper":"GetIdentityPoliciesResult" }, - "documentation":"

    Returns the requested sending authorization policies for the given identity (an email address or a domain). The policies are returned as a map of policy names to policy contents. You can retrieve a maximum of 20 policies at a time.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns the requested sending authorization policies for the given identity (an email address or a domain). The policies are returned as a map of policy names to policy contents. You can retrieve a maximum of 20 policies at a time.

    This operation is for the identity owner only. If you have not verified the identity, it returns an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "GetIdentityVerificationAttributes":{ "name":"GetIdentityVerificationAttributes", @@ -482,7 +482,7 @@ "shape":"GetIdentityVerificationAttributesResponse", "resultWrapper":"GetIdentityVerificationAttributesResult" }, - "documentation":"

    Given a list of identities (email addresses and/or domains), returns the verification status and (for domain identities) the verification token for each identity.

    The verification status of an email address is \"Pending\" until the email address owner clicks the link within the verification email that Amazon SES sent to that address. If the email address owner clicks the link within 24 hours, the verification status of the email address changes to \"Success\". If the link is not clicked within 24 hours, the verification status changes to \"Failed.\" In that case, if you still want to verify the email address, you must restart the verification process from the beginning.

    For domain identities, the domain's verification status is \"Pending\" as Amazon SES searches for the required TXT record in the DNS settings of the domain. When Amazon SES detects the record, the domain's verification status changes to \"Success\". If Amazon SES is unable to detect the record within 72 hours, the domain's verification status changes to \"Failed.\" In that case, if you still want to verify the domain, you must restart the verification process from the beginning.

    This operation is throttled at one request per second and can only get verification attributes for up to 100 identities at a time.

    " + "documentation":"

    Given a list of identities (email addresses and/or domains), returns the verification status and (for domain identities) the verification token for each identity.

    The verification status of an email address is \"Pending\" until the email address owner clicks the link within the verification email that Amazon SES sent to that address. If the email address owner clicks the link within 24 hours, the verification status of the email address changes to \"Success\". If the link is not clicked within 24 hours, the verification status changes to \"Failed.\" In that case, to verify the email address, you must restart the verification process from the beginning.

    For domain identities, the domain's verification status is \"Pending\" as Amazon SES searches for the required TXT record in the DNS settings of the domain. When Amazon SES detects the record, the domain's verification status changes to \"Success\". If Amazon SES is unable to detect the record within 72 hours, the domain's verification status changes to \"Failed.\" In that case, to verify the domain, you must restart the verification process from the beginning.

    This operation is throttled at one request per second and can only get verification attributes for up to 100 identities at a time.

    " }, "GetSendQuota":{ "name":"GetSendQuota", @@ -506,7 +506,7 @@ "shape":"GetSendStatisticsResponse", "resultWrapper":"GetSendStatisticsResult" }, - "documentation":"

    Provides sending statistics for the current AWS Region. The result is a list of data points, representing the last two weeks of sending activity. Each data point in the list contains statistics for a 15-minute period of time.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Provides sending statistics for the current Amazon Web Services Region. The result is a list of data points, representing the last two weeks of sending activity. Each data point in the list contains statistics for a 15-minute period of time.

    You can execute this operation no more than once per second.

    " }, "GetTemplate":{ "name":"GetTemplate", @@ -535,7 +535,7 @@ "shape":"ListConfigurationSetsResponse", "resultWrapper":"ListConfigurationSetsResult" }, - "documentation":"

    Provides a list of the configuration sets associated with your Amazon SES account in the current AWS Region. For information about using configuration sets, see Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second. This operation will return up to 1,000 configuration sets each time it is run. If your Amazon SES account has more than 1,000 configuration sets, this operation will also return a NextToken element. You can then execute the ListConfigurationSets operation again, passing the NextToken parameter and the value of the NextToken element to retrieve additional results.

    " + "documentation":"

    Provides a list of the configuration sets associated with your Amazon SES account in the current Amazon Web Services Region. For information about using configuration sets, see Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second. This operation returns up to 1,000 configuration sets each time it is run. If your Amazon SES account has more than 1,000 configuration sets, this operation also returns NextToken. You can then execute the ListConfigurationSets operation again, passing the NextToken parameter and the value of the NextToken element to retrieve additional results.

    " }, "ListCustomVerificationEmailTemplates":{ "name":"ListCustomVerificationEmailTemplates", @@ -548,7 +548,7 @@ "shape":"ListCustomVerificationEmailTemplatesResponse", "resultWrapper":"ListCustomVerificationEmailTemplatesResult" }, - "documentation":"

    Lists the existing custom verification email templates for your account in the current AWS Region.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Lists the existing custom verification email templates for your account in the current Amazon Web Services Region.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "ListIdentities":{ "name":"ListIdentities", @@ -561,7 +561,7 @@ "shape":"ListIdentitiesResponse", "resultWrapper":"ListIdentitiesResult" }, - "documentation":"

    Returns a list containing all of the identities (email addresses and domains) for your AWS account in the current AWS Region, regardless of verification status.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns a list containing all of the identities (email addresses and domains) for your Amazon Web Services account in the current Amazon Web Services Region, regardless of verification status.

    You can execute this operation no more than once per second.

    It's recommended that for successive pagination calls of this API, you continue to the use the same parameter/value pairs as used in the original call, e.g., if you used IdentityType=Domain in the the original call and received a NextToken in the response, you should continue providing the IdentityType=Domain parameter for further NextToken calls; however, if you didn't provide the IdentityType parameter in the original call, then continue to not provide it for successive pagination calls. Using this protocol will ensure consistent results.

    " }, "ListIdentityPolicies":{ "name":"ListIdentityPolicies", @@ -574,7 +574,7 @@ "shape":"ListIdentityPoliciesResponse", "resultWrapper":"ListIdentityPoliciesResult" }, - "documentation":"

    Returns a list of sending authorization policies that are attached to the given identity (an email address or a domain). This API returns only a list. If you want the actual policy content, you can use GetIdentityPolicies.

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns a list of sending authorization policies that are attached to the given identity (an email address or a domain). This operation returns only a list. To get the actual policy content, use GetIdentityPolicies.

    This operation is for the identity owner only. If you have not verified the identity, it returns an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "ListReceiptFilters":{ "name":"ListReceiptFilters", @@ -587,7 +587,7 @@ "shape":"ListReceiptFiltersResponse", "resultWrapper":"ListReceiptFiltersResult" }, - "documentation":"

    Lists the IP address filters associated with your AWS account in the current AWS Region.

    For information about managing IP address filters, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Lists the IP address filters associated with your Amazon Web Services account in the current Amazon Web Services Region.

    For information about managing IP address filters, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "ListReceiptRuleSets":{ "name":"ListReceiptRuleSets", @@ -600,7 +600,7 @@ "shape":"ListReceiptRuleSetsResponse", "resultWrapper":"ListReceiptRuleSetsResult" }, - "documentation":"

    Lists the receipt rule sets that exist under your AWS account in the current AWS Region. If there are additional receipt rule sets to be retrieved, you will receive a NextToken that you can provide to the next call to ListReceiptRuleSets to retrieve the additional entries.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Lists the receipt rule sets that exist under your Amazon Web Services account in the current Amazon Web Services Region. If there are additional receipt rule sets to be retrieved, you receive a NextToken that you can provide to the next call to ListReceiptRuleSets to retrieve the additional entries.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "ListTemplates":{ "name":"ListTemplates", @@ -613,7 +613,7 @@ "shape":"ListTemplatesResponse", "resultWrapper":"ListTemplatesResult" }, - "documentation":"

    Lists the email templates present in your Amazon SES account in the current AWS Region.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Lists the email templates present in your Amazon SES account in the current Amazon Web Services Region.

    You can execute this operation no more than once per second.

    " }, "ListVerifiedEmailAddresses":{ "name":"ListVerifiedEmailAddresses", @@ -658,7 +658,7 @@ "errors":[ {"shape":"InvalidPolicyException"} ], - "documentation":"

    Adds or updates a sending authorization policy for the specified identity (an email address or a domain).

    This API is for the identity owner only. If you have not verified the identity, this API will return an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Adds or updates a sending authorization policy for the specified identity (an email address or a domain).

    This operation is for the identity owner only. If you have not verified the identity, it returns an error.

    Sending authorization is a feature that enables an identity owner to authorize other senders to use its identities. For information about using sending authorization, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "ReorderReceiptRuleSet":{ "name":"ReorderReceiptRuleSet", @@ -675,7 +675,7 @@ {"shape":"RuleSetDoesNotExistException"}, {"shape":"RuleDoesNotExistException"} ], - "documentation":"

    Reorders the receipt rules within a receipt rule set.

    All of the rules in the rule set must be represented in this request. That is, this API will return an error if the reorder request doesn't explicitly position all of the rules.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Reorders the receipt rules within a receipt rule set.

    All of the rules in the rule set must be represented in this request. That is, it is error if the reorder request doesn't explicitly position all of the rules.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "SendBounce":{ "name":"SendBounce", @@ -691,7 +691,7 @@ "errors":[ {"shape":"MessageRejected"} ], - "documentation":"

    Generates and sends a bounce message to the sender of an email you received through Amazon SES. You can only use this API on an email up to 24 hours after you receive it.

    You cannot use this API to send generic bounces for mail that was not received by Amazon SES.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Generates and sends a bounce message to the sender of an email you received through Amazon SES. You can only use this operation on an email up to 24 hours after you receive it.

    You cannot use this operation to send generic bounces for mail that was not received by Amazon SES.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "SendBulkTemplatedEmail":{ "name":"SendBulkTemplatedEmail", @@ -712,7 +712,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

    Composes an email message to multiple destinations. The message body is created using an email template.

    In order to send email using the SendBulkTemplatedEmail operation, your call to the API must meet the following requirements:

    • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

    • The message must be sent from a verified email address or domain.

    • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

    • The maximum message size is 10 MB.

    • Each Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

    • The message may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call the SendBulkTemplatedEmail operation several times to send the message to each group.

    • The number of destinations you can contact in a single call to the API may be limited by your account's maximum sending rate.

    " + "documentation":"

    Composes an email message to multiple destinations. The message body is created using an email template.

    To send email using this operation, your call must meet the following requirements:

    • The call must refer to an existing email template. You can create email templates using CreateTemplate.

    • The message must be sent from a verified email address or domain.

    • If your account is still in the Amazon SES sandbox, you may send only to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

    • The maximum message size is 10 MB.

    • Each Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message is rejected, even if the message contains other recipients that are valid.

    • The message may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call the SendBulkTemplatedEmail operation several times to send the message to each group.

    • The number of destinations you can contact in a single call can be limited by your account's maximum sending rate.

    " }, "SendCustomVerificationEmail":{ "name":"SendCustomVerificationEmail", @@ -732,7 +732,7 @@ {"shape":"FromEmailAddressNotVerifiedException"}, {"shape":"ProductionAccessNotGrantedException"} ], - "documentation":"

    Adds an email address to the list of identities for your Amazon SES account in the current AWS Region and attempts to verify it. As a result of executing this operation, a customized verification email is sent to the specified address.

    To use this operation, you must first create a custom verification email template. For more information about creating and using custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Adds an email address to the list of identities for your Amazon SES account in the current Amazon Web Services Region and attempts to verify it. As a result of executing this operation, a customized verification email is sent to the specified address.

    To use this operation, you must first create a custom verification email template. For more information about creating and using custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "SendEmail":{ "name":"SendEmail", @@ -752,7 +752,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

    Composes an email message and immediately queues it for sending. In order to send email using the SendEmail operation, your message must meet the following requirements:

    • The message must be sent from a verified email address or domain. If you attempt to send email using a non-verified address or domain, the operation will result in an \"Email address not verified\" error.

    • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

    • The maximum message size is 10 MB.

    • The message must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

    • The message may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call the SendEmail operation several times to send the message to each group.

    For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

    " + "documentation":"

    Composes an email message and immediately queues it for sending. To send email using this operation, your message must meet the following requirements:

    • The message must be sent from a verified email address or domain. If you attempt to send email using a non-verified address or domain, the operation results in an \"Email address not verified\" error.

    • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

    • The maximum message size is 10 MB.

    • The message must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message is rejected, even if the message contains other recipients that are valid.

    • The message may not include more than 50 recipients, across the To:, CC: and BCC: fields. If you need to send an email message to a larger audience, you can divide your recipient list into groups of 50 or fewer, and then call the SendEmail operation several times to send the message to each group.

    For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

    " }, "SendRawEmail":{ "name":"SendRawEmail", @@ -772,7 +772,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

    Composes an email message and immediately queues it for sending.

    This operation is more flexible than the SendEmail API operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you want to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

    The SendRawEmail operation has the following requirements:

    • You can only send email from verified email addresses or domains. If you try to send email from an address that isn't verified, the operation results in an \"Email address not verified\" error.

    • If your account is still in the Amazon SES sandbox, you can only send email to other verified addresses in your account, or to addresses that are associated with the Amazon SES mailbox simulator.

    • The maximum message size, including attachments, is 10 MB.

    • Each message has to include at least one recipient address. A recipient address includes any address on the To:, CC:, or BCC: lines.

    • If you send a single message to more than one recipient address, and one of the recipient addresses isn't in a valid format (that is, it's not in the format UserName@[SubDomain.]Domain.TopLevelDomain), Amazon SES rejects the entire message, even if the other addresses are valid.

    • Each message can include up to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to send a single message to more than 50 recipients, you have to split the list of recipient addresses into groups of less than 50 recipients, and send separate messages to each group.

    • Amazon SES allows you to specify 8-bit Content-Transfer-Encoding for MIME message parts. However, if Amazon SES has to modify the contents of your message (for example, if you use open and click tracking), 8-bit content isn't preserved. For this reason, we highly recommend that you encode all content that isn't 7-bit ASCII. For more information, see MIME Encoding in the Amazon SES Developer Guide.

    Additionally, keep the following considerations in mind when using the SendRawEmail operation:

    • Although you can customize the message headers when using the SendRawEmail operation, Amazon SES will automatically apply its own Message-ID and Date headers; if you passed these headers when creating the message, they will be overwritten by the values that Amazon SES provides.

    • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the API, or you can include the following X-headers in the header of your raw email:

      • X-SES-SOURCE-ARN

      • X-SES-FROM-ARN

      • X-SES-RETURN-PATH-ARN

      Don't include these X-headers in the DKIM signature. Amazon SES removes these before it sends the email.

      If you only specify the SourceIdentityArn parameter, Amazon SES sets the From and Return-Path addresses to the same identity that you specified.

      For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.

    • For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

    " + "documentation":"

    Composes an email message and immediately queues it for sending.

    This operation is more flexible than the SendEmail operation. When you use the SendRawEmail operation, you can specify the headers of the message as well as its content. This flexibility is useful, for example, when you need to send a multipart MIME email (such a message that contains both a text and an HTML version). You can also use this operation to send messages that include attachments.

    The SendRawEmail operation has the following requirements:

    • You can only send email from verified email addresses or domains. If you try to send email from an address that isn't verified, the operation results in an \"Email address not verified\" error.

    • If your account is still in the Amazon SES sandbox, you can only send email to other verified addresses in your account, or to addresses that are associated with the Amazon SES mailbox simulator.

    • The maximum message size, including attachments, is 10 MB.

    • Each message has to include at least one recipient address. A recipient address includes any address on the To:, CC:, or BCC: lines.

    • If you send a single message to more than one recipient address, and one of the recipient addresses isn't in a valid format (that is, it's not in the format UserName@[SubDomain.]Domain.TopLevelDomain), Amazon SES rejects the entire message, even if the other addresses are valid.

    • Each message can include up to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to send a single message to more than 50 recipients, you have to split the list of recipient addresses into groups of less than 50 recipients, and send separate messages to each group.

    • Amazon SES allows you to specify 8-bit Content-Transfer-Encoding for MIME message parts. However, if Amazon SES has to modify the contents of your message (for example, if you use open and click tracking), 8-bit content isn't preserved. For this reason, we highly recommend that you encode all content that isn't 7-bit ASCII. For more information, see MIME Encoding in the Amazon SES Developer Guide.

    Additionally, keep the following considerations in mind when using the SendRawEmail operation:

    • Although you can customize the message headers when using the SendRawEmail operation, Amazon SES automatically applies its own Message-ID and Date headers; if you passed these headers when creating the message, they are overwritten by the values that Amazon SES provides.

    • If you are using sending authorization to send on behalf of another user, SendRawEmail enables you to specify the cross-account identity for the email's Source, From, and Return-Path parameters in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn, or you can include the following X-headers in the header of your raw email:

      • X-SES-SOURCE-ARN

      • X-SES-FROM-ARN

      • X-SES-RETURN-PATH-ARN

      Don't include these X-headers in the DKIM signature. Amazon SES removes these before it sends the email.

      If you only specify the SourceIdentityArn parameter, Amazon SES sets the From and Return-Path addresses to the same identity that you specified.

      For more information about sending authorization, see the Using Sending Authorization with Amazon SES in the Amazon SES Developer Guide.

    • For every message that you send, the total number of recipients (including each recipient in the To:, CC: and BCC: fields) is counted against the maximum number of emails you can send in a 24-hour period (your sending quota). For more information about sending quotas in Amazon SES, see Managing Your Amazon SES Sending Limits in the Amazon SES Developer Guide.

    " }, "SendTemplatedEmail":{ "name":"SendTemplatedEmail", @@ -793,7 +793,7 @@ {"shape":"ConfigurationSetSendingPausedException"}, {"shape":"AccountSendingPausedException"} ], - "documentation":"

    Composes an email message using an email template and immediately queues it for sending.

    In order to send email using the SendTemplatedEmail operation, your call to the API must meet the following requirements:

    • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

    • The message must be sent from a verified email address or domain.

    • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

    • The maximum message size is 10 MB.

    • Calls to the SendTemplatedEmail operation may only include one Destination parameter. A destination is a set of recipients who will receive the same version of the email. The Destination parameter can include up to 50 recipients, across the To:, CC: and BCC: fields.

    • The Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be rejected, even if the message contains other recipients that are valid.

    If your call to the SendTemplatedEmail operation includes all of the required parameters, Amazon SES accepts it and returns a Message ID. However, if Amazon SES can't render the email because the template contains errors, it doesn't send the email. Additionally, because it already accepted the message, Amazon SES doesn't return a message stating that it was unable to send the email.

    For these reasons, we highly recommend that you set up Amazon SES to send you notifications when Rendering Failure events occur. For more information, see Sending Personalized Email Using the Amazon SES API in the Amazon Simple Email Service Developer Guide.

    " + "documentation":"

    Composes an email message using an email template and immediately queues it for sending.

    To send email using this operation, your call must meet the following requirements:

    • The call must refer to an existing email template. You can create email templates using the CreateTemplate operation.

    • The message must be sent from a verified email address or domain.

    • If your account is still in the Amazon SES sandbox, you may only send to verified addresses or domains, or to email addresses associated with the Amazon SES Mailbox Simulator. For more information, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

    • The maximum message size is 10 MB.

    • Calls to the SendTemplatedEmail operation may only include one Destination parameter. A destination is a set of recipients that receives the same version of the email. The Destination parameter can include up to 50 recipients, across the To:, CC: and BCC: fields.

    • The Destination parameter must include at least one recipient email address. The recipient address can be a To: address, a CC: address, or a BCC: address. If a recipient email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire message is rejected, even if the message contains other recipients that are valid.

    If your call to the SendTemplatedEmail operation includes all of the required parameters, Amazon SES accepts it and returns a Message ID. However, if Amazon SES can't render the email because the template contains errors, it doesn't send the email. Additionally, because it already accepted the message, Amazon SES doesn't return a message stating that it was unable to send the email.

    For these reasons, we highly recommend that you set up Amazon SES to send you notifications when Rendering Failure events occur. For more information, see Sending Personalized Email Using the Amazon SES API in the Amazon Simple Email Service Developer Guide.

    " }, "SetActiveReceiptRuleSet":{ "name":"SetActiveReceiptRuleSet", @@ -809,7 +809,7 @@ "errors":[ {"shape":"RuleSetDoesNotExistException"} ], - "documentation":"

    Sets the specified receipt rule set as the active receipt rule set.

    To disable your email-receiving through Amazon SES completely, you can call this API with RuleSetName set to null.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Sets the specified receipt rule set as the active receipt rule set.

    To disable your email-receiving through Amazon SES completely, you can call this operation with RuleSetName set to null.

    For information about managing receipt rule sets, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "SetIdentityDkimEnabled":{ "name":"SetIdentityDkimEnabled", @@ -822,7 +822,7 @@ "shape":"SetIdentityDkimEnabledResponse", "resultWrapper":"SetIdentityDkimEnabledResult" }, - "documentation":"

    Enables or disables Easy DKIM signing of email sent from an identity. If Easy DKIM signing is enabled for a domain, then Amazon SES uses DKIM to sign all email that it sends from addresses on that domain. If Easy DKIM signing is enabled for an email address, then Amazon SES uses DKIM to sign all email it sends from that address.

    For email addresses (for example, user@example.com), you can only enable DKIM signing if the corresponding domain (in this case, example.com) has been set up to use Easy DKIM.

    You can enable DKIM signing for an identity at any time after you start the verification process for the identity, even if the verification process isn't complete.

    You can execute this operation no more than once per second.

    For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

    " + "documentation":"

    Enables or disables Easy DKIM signing of email sent from an identity. If Easy DKIM signing is enabled for a domain, then Amazon SES uses DKIM to sign all email that it sends from addresses on that domain. If Easy DKIM signing is enabled for an email address, then Amazon SES uses DKIM to sign all email it sends from that address.

    For email addresses (for example, user@example.com), you can only enable DKIM signing if the corresponding domain (in this case, example.com) has been set up to use Easy DKIM.

    You can enable DKIM signing for an identity at any time after you start the verification process for the identity, even if the verification process isn't complete.

    You can execute this operation no more than once per second.

    For more information about Easy DKIM signing, go to the Amazon SES Developer Guide.

    " }, "SetIdentityFeedbackForwardingEnabled":{ "name":"SetIdentityFeedbackForwardingEnabled", @@ -835,7 +835,7 @@ "shape":"SetIdentityFeedbackForwardingEnabledResponse", "resultWrapper":"SetIdentityFeedbackForwardingEnabledResult" }, - "documentation":"

    Given an identity (an email address or a domain), enables or disables whether Amazon SES forwards bounce and complaint notifications as email. Feedback forwarding can only be disabled when Amazon Simple Notification Service (Amazon SNS) topics are specified for both bounces and complaints.

    Feedback forwarding does not apply to delivery notifications. Delivery notifications are only available through Amazon SNS.

    You can execute this operation no more than once per second.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    " + "documentation":"

    Given an identity (an email address or a domain), enables or disables whether Amazon SES forwards bounce and complaint notifications as email. Feedback forwarding can only be disabled when Amazon Simple Notification Service (Amazon SNS) topics are specified for both bounces and complaints.

    Feedback forwarding does not apply to delivery notifications. Delivery notifications are only available through Amazon SNS.

    You can execute this operation no more than once per second.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    " }, "SetIdentityHeadersInNotificationsEnabled":{ "name":"SetIdentityHeadersInNotificationsEnabled", @@ -848,7 +848,7 @@ "shape":"SetIdentityHeadersInNotificationsEnabledResponse", "resultWrapper":"SetIdentityHeadersInNotificationsEnabledResult" }, - "documentation":"

    Given an identity (an email address or a domain), sets whether Amazon SES includes the original email headers in the Amazon Simple Notification Service (Amazon SNS) notifications of a specified type.

    You can execute this operation no more than once per second.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    " + "documentation":"

    Given an identity (an email address or a domain), sets whether Amazon SES includes the original email headers in the Amazon Simple Notification Service (Amazon SNS) notifications of a specified type.

    You can execute this operation no more than once per second.

    For more information about using notifications with Amazon SES, see the Amazon SES Developer Guide.

    " }, "SetIdentityMailFromDomain":{ "name":"SetIdentityMailFromDomain", @@ -861,7 +861,7 @@ "shape":"SetIdentityMailFromDomainResponse", "resultWrapper":"SetIdentityMailFromDomainResult" }, - "documentation":"

    Enables or disables the custom MAIL FROM domain setup for a verified identity (an email address or a domain).

    To send emails using the specified MAIL FROM domain, you must add an MX record to your MAIL FROM domain's DNS settings. If you want your emails to pass Sender Policy Framework (SPF) checks, you must also add or update an SPF record. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Enables or disables the custom MAIL FROM domain setup for a verified identity (an email address or a domain).

    To send emails using the specified MAIL FROM domain, you must add an MX record to your MAIL FROM domain's DNS settings. To ensure that your emails pass Sender Policy Framework (SPF) checks, you must also add or update an SPF record. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "SetIdentityNotificationTopic":{ "name":"SetIdentityNotificationTopic", @@ -874,7 +874,7 @@ "shape":"SetIdentityNotificationTopicResponse", "resultWrapper":"SetIdentityNotificationTopicResult" }, - "documentation":"

    Sets an Amazon Simple Notification Service (Amazon SNS) topic to use when delivering notifications. When you use this operation, you specify a verified identity, such as an email address or domain. When you send an email that uses the chosen identity in the Source field, Amazon SES sends notifications to the topic you specified. You can send bounce, complaint, or delivery notifications (or any combination of the three) to the Amazon SNS topic that you specify.

    You can execute this operation no more than once per second.

    For more information about feedback notification, see the Amazon SES Developer Guide.

    " + "documentation":"

    Sets an Amazon Simple Notification Service (Amazon SNS) topic to use when delivering notifications. When you use this operation, you specify a verified identity, such as an email address or domain. When you send an email that uses the chosen identity in the Source field, Amazon SES sends notifications to the topic you specified. You can send bounce, complaint, or delivery notifications (or any combination of the three) to the Amazon SNS topic that you specify.

    You can execute this operation no more than once per second.

    For more information about feedback notification, see the Amazon SES Developer Guide.

    " }, "SetReceiptRulePosition":{ "name":"SetReceiptRulePosition", @@ -891,7 +891,7 @@ {"shape":"RuleSetDoesNotExistException"}, {"shape":"RuleDoesNotExistException"} ], - "documentation":"

    Sets the position of the specified receipt rule in the receipt rule set.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Sets the position of the specified receipt rule in the receipt rule set.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "TestRenderTemplate":{ "name":"TestRenderTemplate", @@ -918,7 +918,7 @@ "requestUri":"/" }, "input":{"shape":"UpdateAccountSendingEnabledRequest"}, - "documentation":"

    Enables or disables email sending across your entire Amazon SES account in the current AWS Region. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending across your Amazon SES account in a given AWS Region when reputation metrics (such as your bounce or complaint rates) reach certain thresholds.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Enables or disables email sending across your entire Amazon SES account in the current Amazon Web Services Region. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending across your Amazon SES account in a given Amazon Web Services Region when reputation metrics (such as your bounce or complaint rates) reach certain thresholds.

    You can execute this operation no more than once per second.

    " }, "UpdateConfigurationSetEventDestination":{ "name":"UpdateConfigurationSetEventDestination", @@ -938,7 +938,7 @@ {"shape":"InvalidFirehoseDestinationException"}, {"shape":"InvalidSNSDestinationException"} ], - "documentation":"

    Updates the event destination of a configuration set. Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS). For information about using configuration sets, see Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide.

    When you create or update an event destination, you must provide one, and only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS).

    You can execute this operation no more than once per second.

    " + "documentation":"

    Updates the event destination of a configuration set. Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS). For information about using configuration sets, see Monitoring Your Amazon SES Sending Activity in the Amazon SES Developer Guide.

    When you create or update an event destination, you must provide one, and only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS).

    You can execute this operation no more than once per second.

    " }, "UpdateConfigurationSetReputationMetricsEnabled":{ "name":"UpdateConfigurationSetReputationMetricsEnabled", @@ -950,7 +950,7 @@ "errors":[ {"shape":"ConfigurationSetDoesNotExistException"} ], - "documentation":"

    Enables or disables the publishing of reputation metrics for emails sent using a specific configuration set in a given AWS Region. Reputation metrics include bounce and complaint rates. These metrics are published to Amazon CloudWatch. By using CloudWatch, you can create alarms when bounce or complaint rates exceed certain thresholds.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Enables or disables the publishing of reputation metrics for emails sent using a specific configuration set in a given Amazon Web Services Region. Reputation metrics include bounce and complaint rates. These metrics are published to Amazon CloudWatch. By using CloudWatch, you can create alarms when bounce or complaint rates exceed certain thresholds.

    You can execute this operation no more than once per second.

    " }, "UpdateConfigurationSetSendingEnabled":{ "name":"UpdateConfigurationSetSendingEnabled", @@ -962,7 +962,7 @@ "errors":[ {"shape":"ConfigurationSetDoesNotExistException"} ], - "documentation":"

    Enables or disables email sending for messages sent using a specific configuration set in a given AWS Region. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending for a configuration set when the reputation metrics for that configuration set (such as your bounce on complaint rate) exceed certain thresholds.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Enables or disables email sending for messages sent using a specific configuration set in a given Amazon Web Services Region. You can use this operation in conjunction with Amazon CloudWatch alarms to temporarily pause email sending for a configuration set when the reputation metrics for that configuration set (such as your bounce on complaint rate) exceed certain thresholds.

    You can execute this operation no more than once per second.

    " }, "UpdateConfigurationSetTrackingOptions":{ "name":"UpdateConfigurationSetTrackingOptions", @@ -980,7 +980,7 @@ {"shape":"TrackingOptionsDoesNotExistException"}, {"shape":"InvalidTrackingOptionsException"} ], - "documentation":"

    Modifies an association between a configuration set and a custom domain for open and click event tracking.

    By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

    " + "documentation":"

    Modifies an association between a configuration set and a custom domain for open and click event tracking.

    By default, images and links used for tracking open and click events are hosted on domains operated by Amazon SES. You can configure a subdomain of your own to handle these events. For information about using custom domains, see the Amazon SES Developer Guide.

    " }, "UpdateCustomVerificationEmailTemplate":{ "name":"UpdateCustomVerificationEmailTemplate", @@ -994,7 +994,7 @@ {"shape":"FromEmailAddressNotVerifiedException"}, {"shape":"CustomVerificationEmailInvalidContentException"} ], - "documentation":"

    Updates an existing custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Updates an existing custom verification email template.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "UpdateReceiptRule":{ "name":"UpdateReceiptRule", @@ -1015,7 +1015,7 @@ {"shape":"RuleDoesNotExistException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Updates a receipt rule.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Updates a receipt rule.

    For information about managing receipt rules, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "UpdateTemplate":{ "name":"UpdateTemplate", @@ -1032,7 +1032,7 @@ {"shape":"TemplateDoesNotExistException"}, {"shape":"InvalidTemplateException"} ], - "documentation":"

    Updates an email template. Email templates enable you to send personalized email to one or more destinations in a single API operation. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Updates an email template. Email templates enable you to send personalized email to one or more destinations in a single operation. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "VerifyDomainDkim":{ "name":"VerifyDomainDkim", @@ -1045,7 +1045,7 @@ "shape":"VerifyDomainDkimResponse", "resultWrapper":"VerifyDomainDkimResult" }, - "documentation":"

    Returns a set of DKIM tokens for a domain identity.

    When you execute the VerifyDomainDkim operation, the domain that you specify is added to the list of identities that are associated with your account. This is true even if you haven't already associated the domain with your account by using the VerifyDomainIdentity operation. However, you can't send email from the domain until you either successfully verify it or you successfully set up DKIM for it.

    You use the tokens that are generated by this operation to create CNAME records. When Amazon SES detects that you've added these records to the DNS configuration for a domain, you can start sending email from that domain. You can start sending email even if you haven't added the TXT record provided by the VerifyDomainIdentity operation to the DNS configuration for your domain. All email that you send from the domain is authenticated using DKIM.

    To create the CNAME records for DKIM authentication, use the following values:

    • Name: token._domainkey.example.com

    • Type: CNAME

    • Value: token.dkim.amazonses.com

    In the preceding example, replace token with one of the tokens that are generated when you execute this operation. Replace example.com with your domain. Repeat this process for each token that's generated by this operation.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Returns a set of DKIM tokens for a domain identity.

    When you execute the VerifyDomainDkim operation, the domain that you specify is added to the list of identities that are associated with your account. This is true even if you haven't already associated the domain with your account by using the VerifyDomainIdentity operation. However, you can't send email from the domain until you either successfully verify it or you successfully set up DKIM for it.

    You use the tokens that are generated by this operation to create CNAME records. When Amazon SES detects that you've added these records to the DNS configuration for a domain, you can start sending email from that domain. You can start sending email even if you haven't added the TXT record provided by the VerifyDomainIdentity operation to the DNS configuration for your domain. All email that you send from the domain is authenticated using DKIM.

    To create the CNAME records for DKIM authentication, use the following values:

    • Name: token._domainkey.example.com

    • Type: CNAME

    • Value: token.dkim.amazonses.com

    In the preceding example, replace token with one of the tokens that are generated when you execute this operation. Replace example.com with your domain. Repeat this process for each token that's generated by this operation.

    You can execute this operation no more than once per second.

    " }, "VerifyDomainIdentity":{ "name":"VerifyDomainIdentity", @@ -1058,7 +1058,7 @@ "shape":"VerifyDomainIdentityResponse", "resultWrapper":"VerifyDomainIdentityResult" }, - "documentation":"

    Adds a domain to the list of identities for your Amazon SES account in the current AWS Region and attempts to verify it. For more information about verifying domains, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Adds a domain to the list of identities for your Amazon SES account in the current Amazon Web Services Region and attempts to verify it. For more information about verifying domains, see Verifying Email Addresses and Domains in the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, "VerifyEmailAddress":{ "name":"VerifyEmailAddress", @@ -1080,7 +1080,7 @@ "shape":"VerifyEmailIdentityResponse", "resultWrapper":"VerifyEmailIdentityResult" }, - "documentation":"

    Adds an email address to the list of identities for your Amazon SES account in the current AWS region and attempts to verify it. As a result of executing this operation, a verification email is sent to the specified address.

    You can execute this operation no more than once per second.

    " + "documentation":"

    Adds an email address to the list of identities for your Amazon SES account in the current Amazon Web Services Region and attempts to verify it. As a result of executing this operation, a verification email is sent to the specified address.

    You can execute this operation no more than once per second.

    " } }, "shapes":{ @@ -1105,14 +1105,14 @@ "members":{ "HeaderName":{ "shape":"HeaderName", - "documentation":"

    The name of the header to add. Must be between 1 and 50 characters, inclusive, and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only.

    " + "documentation":"

    The name of the header to add to the incoming message. The name must contain at least one character, and can contain up to 50 characters. It consists of alphanumeric (a–z, A–Z, 0–9) characters and dashes.

    " }, "HeaderValue":{ "shape":"HeaderValue", - "documentation":"

    Must be less than 2048 characters, and must not contain newline characters (\"\\r\" or \"\\n\").

    " + "documentation":"

    The content to include in the header. This value can contain up to 2048 characters. It can't contain newline (\\n) or carriage return (\\r) characters.

    " } }, - "documentation":"

    When included in a receipt rule, this action adds a header to the received email.

    For information about adding a header using a receipt rule, see the Amazon SES Developer Guide.

    " + "documentation":"

    When included in a receipt rule, this action adds a header to the received email.

    For information about adding a header using a receipt rule, see the Amazon SES Developer Guide.

    " }, "Address":{"type":"string"}, "AddressList":{ @@ -1168,7 +1168,7 @@ "members":{ "TopicArn":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the bounce action is taken. You can find the ARN of a topic by using the ListTopics operation in Amazon SNS.

    For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " }, "SmtpReplyCode":{ "shape":"BounceSmtpReplyCode", @@ -1184,10 +1184,10 @@ }, "Sender":{ "shape":"Address", - "documentation":"

    The email address of the sender of the bounced email. This is the address from which the bounce message will be sent.

    " + "documentation":"

    The email address of the sender of the bounced email. This is the address from which the bounce message is sent.

    " } }, - "documentation":"

    When included in a receipt rule, this action rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    For information about sending a bounce message in response to a received email, see the Amazon SES Developer Guide.

    " + "documentation":"

    When included in a receipt rule, this action rejects the received email by returning a bounce response to the sender and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    For information about sending a bounce message in response to a received email, see the Amazon SES Developer Guide.

    " }, "BounceMessage":{"type":"string"}, "BounceSmtpReplyCode":{"type":"string"}, @@ -1213,7 +1213,7 @@ }, "RecipientArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to receive email for the recipient of the bounced email. For more information about sending authorization, see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to receive email for the recipient of the bounced email. For more information about sending authorization, see the Amazon SES Developer Guide.

    " }, "BounceType":{ "shape":"BounceType", @@ -1224,7 +1224,7 @@ "documentation":"

    Recipient-related DSN fields, most of which would normally be filled in automatically when provided with a BounceType. You must provide either this parameter or BounceType.

    " } }, - "documentation":"

    Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    " + "documentation":"

    Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    " }, "BouncedRecipientInfoList":{ "type":"list", @@ -1255,7 +1255,7 @@ "members":{ "Status":{ "shape":"BulkEmailStatus", - "documentation":"

    The status of a message sent using the SendBulkTemplatedEmail operation.

    Possible values for this parameter include:

    • Success: Amazon SES accepted the message, and will attempt to deliver it to the recipients.

    • MessageRejected: The message was rejected because it contained a virus.

    • MailFromDomainNotVerified: The sender's email address or domain was not verified.

    • ConfigurationSetDoesNotExist: The configuration set you specified does not exist.

    • TemplateDoesNotExist: The template you specified does not exist.

    • AccountSuspended: Your account has been shut down because of issues related to your email sending practices.

    • AccountThrottled: The number of emails you can send has been reduced because your account has exceeded its allocated sending limit.

    • AccountDailyQuotaExceeded: You have reached or exceeded the maximum number of emails you can send from your account in a 24-hour period.

    • InvalidSendingPoolName: The configuration set you specified refers to an IP pool that does not exist.

    • AccountSendingPaused: Email sending for the Amazon SES account was disabled using the UpdateAccountSendingEnabled operation.

    • ConfigurationSetSendingPaused: Email sending for this configuration set was disabled using the UpdateConfigurationSetSendingEnabled operation.

    • InvalidParameterValue: One or more of the parameters you specified when calling this operation was invalid. See the error message for additional information.

    • TransientFailure: Amazon SES was unable to process your request because of a temporary issue.

    • Failed: Amazon SES was unable to process your request. See the error message for additional information.

    " + "documentation":"

    The status of a message sent using the SendBulkTemplatedEmail operation.

    Possible values for this parameter include:

    • Success: Amazon SES accepted the message, and attempts to deliver it to the recipients.

    • MessageRejected: The message was rejected because it contained a virus.

    • MailFromDomainNotVerified: The sender's email address or domain was not verified.

    • ConfigurationSetDoesNotExist: The configuration set you specified does not exist.

    • TemplateDoesNotExist: The template you specified does not exist.

    • AccountSuspended: Your account has been shut down because of issues related to your email sending practices.

    • AccountThrottled: The number of emails you can send has been reduced because your account has exceeded its allocated sending limit.

    • AccountDailyQuotaExceeded: You have reached or exceeded the maximum number of emails you can send from your account in a 24-hour period.

    • InvalidSendingPoolName: The configuration set you specified refers to an IP pool that does not exist.

    • AccountSendingPaused: Email sending for the Amazon SES account was disabled using the UpdateAccountSendingEnabled operation.

    • ConfigurationSetSendingPaused: Email sending for this configuration set was disabled using the UpdateConfigurationSetSendingEnabled operation.

    • InvalidParameterValue: One or more of the parameters you specified when calling this operation was invalid. See the error message for additional information.

    • TransientFailure: Amazon SES was unable to process your request because of a temporary issue.

    • Failed: Amazon SES was unable to process your request. See the error message for additional information.

    " }, "Error":{ "shape":"Error", @@ -1318,14 +1318,14 @@ "members":{ "RuleSetName":{ "shape":"ReceiptRuleSetName", - "documentation":"

    The name of the rule set to create. The name must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    " + "documentation":"

    The name of the rule set to create. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain 64 characters or fewer.

    " }, "OriginalRuleSetName":{ "shape":"ReceiptRuleSetName", "documentation":"

    The name of the rule set to clone.

    " } }, - "documentation":"

    Represents a request to create a receipt rule set by cloning an existing one. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to create a receipt rule set by cloning an existing one. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "CloneReceiptRuleSetResponse":{ "type":"structure", @@ -1342,7 +1342,7 @@ "documentation":"

    A list of dimensions upon which to categorize your emails when you publish email sending events to Amazon CloudWatch.

    " } }, - "documentation":"

    Contains information associated with an Amazon CloudWatch event destination to which email sending events are published.

    Event destinations, such as Amazon CloudWatch, are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Contains information associated with an Amazon CloudWatch event destination to which email sending events are published.

    Event destinations, such as Amazon CloudWatch, are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "CloudWatchDimensionConfiguration":{ "type":"structure", @@ -1354,18 +1354,18 @@ "members":{ "DimensionName":{ "shape":"DimensionName", - "documentation":"

    The name of an Amazon CloudWatch dimension associated with an email sending metric. The name must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain less than 256 characters.

    " + "documentation":"

    The name of an Amazon CloudWatch dimension associated with an email sending metric. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), dashes (-), or colons (:).

    • Contain 256 characters or fewer.

    " }, "DimensionValueSource":{ "shape":"DimensionValueSource", - "documentation":"

    The place where Amazon SES finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon SES to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose messageTag. If you want Amazon SES to use your own email headers, choose emailHeader.

    " + "documentation":"

    The place where Amazon SES finds the value of a dimension to publish to Amazon CloudWatch. To use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, specify messageTag. To use your own email headers, specify emailHeader. To put a custom tag on any link included in your email, specify linkTag.

    " }, "DefaultDimensionValue":{ "shape":"DefaultDimensionValue", - "documentation":"

    The default value of the dimension that is published to Amazon CloudWatch if you do not provide the value of the dimension when you send an email. The default value must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain less than 256 characters.

    " + "documentation":"

    The default value of the dimension that is published to Amazon CloudWatch if you do not provide the value of the dimension when you send an email. The default value must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), dashes (-), at signs (@), or periods (.).

    • Contain 256 characters or fewer.

    " } }, - "documentation":"

    Contains the dimension configuration to use when you publish email sending events to Amazon CloudWatch.

    For information about publishing email sending events to Amazon CloudWatch, see the Amazon SES Developer Guide.

    " + "documentation":"

    Contains the dimension configuration to use when you publish email sending events to Amazon CloudWatch.

    For information about publishing email sending events to Amazon CloudWatch, see the Amazon SES Developer Guide.

    " }, "CloudWatchDimensionConfigurations":{ "type":"list", @@ -1380,7 +1380,7 @@ "documentation":"

    The name of the configuration set. The name must meet the following requirements:

    • Contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain 64 characters or fewer.

    " } }, - "documentation":"

    The name of the configuration set.

    Configuration sets let you create groups of rules that you can apply to the emails you send using Amazon SES. For more information about using configuration sets, see Using Amazon SES Configuration Sets in the Amazon SES Developer Guide.

    " + "documentation":"

    The name of the configuration set.

    Configuration sets let you create groups of rules that you can apply to the emails you send using Amazon SES. For more information about using configuration sets, see Using Amazon SES Configuration Sets in the Amazon SES Developer Guide.

    " }, "ConfigurationSetAlreadyExistsException":{ "type":"structure", @@ -1477,10 +1477,10 @@ }, "EventDestination":{ "shape":"EventDestination", - "documentation":"

    An object that describes the AWS service that email sending event information will be published to.

    " + "documentation":"

    An object that describes the Amazon Web Services service that email sending event where information is published.

    " } }, - "documentation":"

    Represents a request to create a configuration set event destination. A configuration set event destination, which can be either Amazon CloudWatch or Amazon Kinesis Firehose, describes an AWS service in which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to create a configuration set event destination. A configuration set event destination, which can be either Amazon CloudWatch or Amazon Kinesis Firehose, describes an Amazon Web Services service in which Amazon SES publishes the email sending events associated with a configuration set. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "CreateConfigurationSetEventDestinationResponse":{ "type":"structure", @@ -1497,7 +1497,7 @@ "documentation":"

    A data structure that contains the name of the configuration set.

    " } }, - "documentation":"

    Represents a request to create a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to create a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "CreateConfigurationSetResponse":{ "type":"structure", @@ -1551,7 +1551,7 @@ }, "TemplateContent":{ "shape":"TemplateContent", - "documentation":"

    The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom Verification Email Frequently Asked Questions in the Amazon SES Developer Guide.

    " + "documentation":"

    The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom Verification Email Frequently Asked Questions in the Amazon SES Developer Guide.

    " }, "SuccessRedirectionURL":{ "shape":"SuccessRedirectionURL", @@ -1573,7 +1573,7 @@ "documentation":"

    A data structure that describes the IP address filter to create, which consists of a name, an IP address range, and whether to allow or block mail from it.

    " } }, - "documentation":"

    Represents a request to create a new IP address filter. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to create a new IP address filter. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "CreateReceiptFilterResponse":{ "type":"structure", @@ -1590,18 +1590,18 @@ "members":{ "RuleSetName":{ "shape":"ReceiptRuleSetName", - "documentation":"

    The name of the rule set that the receipt rule will be added to.

    " + "documentation":"

    The name of the rule set where the receipt rule is added.

    " }, "After":{ "shape":"ReceiptRuleName", - "documentation":"

    The name of an existing rule after which the new rule will be placed. If this parameter is null, the new rule will be inserted at the beginning of the rule list.

    " + "documentation":"

    The name of an existing rule after which the new rule is placed. If this parameter is null, the new rule is inserted at the beginning of the rule list.

    " }, "Rule":{ "shape":"ReceiptRule", "documentation":"

    A data structure that contains the specified rule's name, actions, recipients, domains, enabled status, scan status, and TLS policy.

    " } }, - "documentation":"

    Represents a request to create a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to create a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "CreateReceiptRuleResponse":{ "type":"structure", @@ -1615,10 +1615,10 @@ "members":{ "RuleSetName":{ "shape":"ReceiptRuleSetName", - "documentation":"

    The name of the rule set to create. The name must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    " + "documentation":"

    The name of the rule set to create. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain 64 characters or fewer.

    " } }, - "documentation":"

    Represents a request to create an empty receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to create an empty receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "CreateReceiptRuleSetResponse":{ "type":"structure", @@ -1632,10 +1632,10 @@ "members":{ "Template":{ "shape":"Template", - "documentation":"

    The content of the email, composed of a subject line, an HTML part, and a text-only part.

    " + "documentation":"

    The content of the email, composed of a subject line and either an HTML part or a text-only part.

    " } }, - "documentation":"

    Represents a request to create an email template. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to create an email template. For more information, see the Amazon SES Developer Guide.

    " }, "CreateTemplateResponse":{ "type":"structure", @@ -1743,7 +1743,7 @@ "documentation":"

    The name of the event destination to delete.

    " } }, - "documentation":"

    Represents a request to delete a configuration set event destination. Configuration set event destinations are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to delete a configuration set event destination. Configuration set event destinations are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "DeleteConfigurationSetEventDestinationResponse":{ "type":"structure", @@ -1760,7 +1760,7 @@ "documentation":"

    The name of the configuration set to delete.

    " } }, - "documentation":"

    Represents a request to delete a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to delete a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "DeleteConfigurationSetResponse":{ "type":"structure", @@ -1774,7 +1774,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

    The name of the configuration set from which you want to delete the tracking options.

    " + "documentation":"

    The name of the configuration set.

    " } }, "documentation":"

    Represents a request to delete open and click tracking options in a configuration set.

    " @@ -1791,7 +1791,7 @@ "members":{ "TemplateName":{ "shape":"TemplateName", - "documentation":"

    The name of the custom verification email template that you want to delete.

    " + "documentation":"

    The name of the custom verification email template to delete.

    " } }, "documentation":"

    Represents a request to delete an existing custom verification email template.

    " @@ -1805,14 +1805,14 @@ "members":{ "Identity":{ "shape":"Identity", - "documentation":"

    The identity that is associated with the policy that you want to delete. You can specify the identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    " + "documentation":"

    The identity that is associated with the policy to delete. You can specify the identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this operation, you must own the identity.

    " }, "PolicyName":{ "shape":"PolicyName", "documentation":"

    The name of the policy to be deleted.

    " } }, - "documentation":"

    Represents a request to delete a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to delete a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " }, "DeleteIdentityPolicyResponse":{ "type":"structure", @@ -1826,7 +1826,7 @@ "members":{ "Identity":{ "shape":"Identity", - "documentation":"

    The identity to be removed from the list of identities for the AWS Account.

    " + "documentation":"

    The identity to be removed from the list of identities for the Amazon Web Services account.

    " } }, "documentation":"

    Represents a request to delete one of your Amazon SES identities (an email address or domain).

    " @@ -1846,7 +1846,7 @@ "documentation":"

    The name of the IP address filter to delete.

    " } }, - "documentation":"

    Represents a request to delete an IP address filter. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to delete an IP address filter. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "DeleteReceiptFilterResponse":{ "type":"structure", @@ -1870,7 +1870,7 @@ "documentation":"

    The name of the receipt rule to delete.

    " } }, - "documentation":"

    Represents a request to delete a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to delete a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "DeleteReceiptRuleResponse":{ "type":"structure", @@ -1887,7 +1887,7 @@ "documentation":"

    The name of the receipt rule set to delete.

    " } }, - "documentation":"

    Represents a request to delete a receipt rule set and all of the receipt rules it contains. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to delete a receipt rule set and all of the receipt rules it contains. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "DeleteReceiptRuleSetResponse":{ "type":"structure", @@ -1904,7 +1904,7 @@ "documentation":"

    The name of the template to be deleted.

    " } }, - "documentation":"

    Represents a request to delete an email template. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to delete an email template. For more information, see the Amazon SES Developer Guide.

    " }, "DeleteTemplateResponse":{ "type":"structure", @@ -1920,7 +1920,7 @@ "documentation":"

    An email address to be removed from the list of verified addresses.

    " } }, - "documentation":"

    Represents a request to delete an email address from the list of email addresses you have attempted to verify under your AWS account.

    " + "documentation":"

    Represents a request to delete an email address from the list of email addresses you have attempted to verify under your Amazon Web Services account.

    " }, "DeliveryOptions":{ "type":"structure", @@ -1936,7 +1936,7 @@ "type":"structure", "members":{ }, - "documentation":"

    Represents a request to return the metadata and receipt rules for the receipt rule set that is currently active. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return the metadata and receipt rules for the receipt rule set that is currently active. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "DescribeActiveReceiptRuleSetResponse":{ "type":"structure", @@ -1965,7 +1965,7 @@ "documentation":"

    A list of configuration set attributes to return.

    " } }, - "documentation":"

    Represents a request to return the details of a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return the details of a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "DescribeConfigurationSetResponse":{ "type":"structure", @@ -1988,7 +1988,7 @@ "documentation":"

    An object that represents the reputation settings for the configuration set.

    " } }, - "documentation":"

    Represents the details of a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents the details of a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "DescribeReceiptRuleRequest":{ "type":"structure", @@ -2006,7 +2006,7 @@ "documentation":"

    The name of the receipt rule.

    " } }, - "documentation":"

    Represents a request to return the details of a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return the details of a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "DescribeReceiptRuleResponse":{ "type":"structure", @@ -2027,7 +2027,7 @@ "documentation":"

    The name of the receipt rule set to describe.

    " } }, - "documentation":"

    Represents a request to return the details of a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return the details of a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "DescribeReceiptRuleSetResponse":{ "type":"structure", @@ -2059,7 +2059,7 @@ "documentation":"

    The recipients to place on the BCC: line of the message.

    " } }, - "documentation":"

    Represents the destination of the message, consisting of To:, CC:, and BCC: fields.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492.

    " + "documentation":"

    Represents the destination of the message, consisting of To:, CC:, and BCC: fields.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the email address string must be 7-bit ASCII. If you want to send to or from email addresses that contain Unicode characters in the domain part of an address, you must encode the domain using Punycode. Punycode is not permitted in the local part of the email address (the part before the @ sign) nor in the \"friendly from\" name. If you want to use Unicode characters in the \"friendly from\" name, you must encode the \"friendly from\" name using MIME encoded-word syntax, as described in Sending raw email using the Amazon SES API. For more information about Punycode, see RFC 3492.

    " }, "DiagnosticCode":{"type":"string"}, "DimensionName":{"type":"string"}, @@ -2099,7 +2099,7 @@ "members":{ "Name":{ "shape":"EventDestinationName", - "documentation":"

    The name of the event destination. The name must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain less than 64 characters.

    " + "documentation":"

    The name of the event destination. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain 64 characters or fewer.

    " }, "Enabled":{ "shape":"Enabled", @@ -2107,7 +2107,7 @@ }, "MatchingEventTypes":{ "shape":"EventTypes", - "documentation":"

    The type of email sending events to publish to the event destination.

    " + "documentation":"

    The type of email sending events to publish to the event destination.

    • send - The call was successful and Amazon SES is attempting to deliver the email.

    • reject - Amazon SES determined that the email contained a virus and rejected it.

    • bounce - The recipient's mail server permanently rejected the email. This corresponds to a hard bounce.

    • complaint - The recipient marked the email as spam.

    • delivery - Amazon SES successfully delivered the email to the recipient's mail server.

    • open - The recipient received the email and opened it in their email client.

    • click - The recipient clicked one or more links in the email.

    • renderingFailure - Amazon SES did not send the email because of a template rendering issue.

    " }, "KinesisFirehoseDestination":{ "shape":"KinesisFirehoseDestination", @@ -2122,7 +2122,7 @@ "documentation":"

    An object that contains the topic ARN associated with an Amazon Simple Notification Service (Amazon SNS) event destination.

    " } }, - "documentation":"

    Contains information about the event destination that the specified email sending events will be published to.

    When you create or update an event destination, you must provide one, and only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis Firehose or Amazon Simple Notification Service (Amazon SNS).

    Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS). For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Contains information about an event destination.

    When you create or update an event destination, you must provide one, and only one, destination. The destination can be Amazon CloudWatch, Amazon Kinesis Firehose or Amazon Simple Notification Service (Amazon SNS).

    Event destinations are associated with configuration sets, which enable you to publish email sending events to Amazon CloudWatch, Amazon Kinesis Firehose, or Amazon Simple Notification Service (Amazon SNS). For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "EventDestinationAlreadyExistsException":{ "type":"structure", @@ -2200,10 +2200,10 @@ }, "Value":{ "shape":"ExtensionFieldValue", - "documentation":"

    The value of the header to add. Must be less than 2048 characters, and must not contain newline characters (\"\\r\" or \"\\n\").

    " + "documentation":"

    The value of the header to add. Must contain 2048 characters or fewer, and must not contain newline characters (\"\\r\" or \"\\n\").

    " } }, - "documentation":"

    Additional X-headers to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    " + "documentation":"

    Additional X-headers to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    " }, "ExtensionFieldList":{ "type":"list", @@ -2234,10 +2234,10 @@ "members":{ "Enabled":{ "shape":"Enabled", - "documentation":"

    Describes whether email sending is enabled or disabled for your Amazon SES account in the current AWS Region.

    " + "documentation":"

    Describes whether email sending is enabled or disabled for your Amazon SES account in the current Amazon Web Services Region.

    " } }, - "documentation":"

    Represents a request to return the email sending status for your Amazon SES account in the current AWS Region.

    " + "documentation":"

    Represents a request to return the email sending status for your Amazon SES account in the current Amazon Web Services Region.

    " }, "GetCustomVerificationEmailTemplateRequest":{ "type":"structure", @@ -2245,7 +2245,7 @@ "members":{ "TemplateName":{ "shape":"TemplateName", - "documentation":"

    The name of the custom verification email template that you want to retrieve.

    " + "documentation":"

    The name of the custom verification email template to retrieve.

    " } }, "documentation":"

    Represents a request to retrieve an existing custom verification email template.

    " @@ -2289,7 +2289,7 @@ "documentation":"

    A list of one or more verified identities - email addresses, domains, or both.

    " } }, - "documentation":"

    Represents a request for the status of Amazon SES Easy DKIM signing for an identity. For domain identities, this request also returns the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES successfully verified that these tokens were published. For more information about Easy DKIM, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request for the status of Amazon SES Easy DKIM signing for an identity. For domain identities, this request also returns the DKIM tokens that are required for Easy DKIM signing, and whether Amazon SES successfully verified that these tokens were published. For more information about Easy DKIM, see the Amazon SES Developer Guide.

    " }, "GetIdentityDkimAttributesResponse":{ "type":"structure", @@ -2311,7 +2311,7 @@ "documentation":"

    A list of one or more identities.

    " } }, - "documentation":"

    Represents a request to return the Amazon SES custom MAIL FROM attributes for a list of identities. For information about using a custom MAIL FROM domain, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return the Amazon SES custom MAIL FROM attributes for a list of identities. For information about using a custom MAIL FROM domain, see the Amazon SES Developer Guide.

    " }, "GetIdentityMailFromDomainAttributesResponse":{ "type":"structure", @@ -2333,7 +2333,7 @@ "documentation":"

    A list of one or more identities. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    " } }, - "documentation":"

    Represents a request to return the notification attributes for a list of identities you verified with Amazon SES. For information about Amazon SES notifications, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return the notification attributes for a list of identities you verified with Amazon SES. For information about Amazon SES notifications, see the Amazon SES Developer Guide.

    " }, "GetIdentityNotificationAttributesResponse":{ "type":"structure", @@ -2355,14 +2355,14 @@ "members":{ "Identity":{ "shape":"Identity", - "documentation":"

    The identity for which the policies will be retrieved. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    " + "documentation":"

    The identity for which the policies are retrieved. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this operation, you must own the identity.

    " }, "PolicyNames":{ "shape":"PolicyNameList", "documentation":"

    A list of the names of policies to be retrieved. You can retrieve a maximum of 20 policies at a time. If you do not know the names of the policies that are attached to the identity, you can use ListIdentityPolicies.

    " } }, - "documentation":"

    Represents a request to return the requested sending authorization policies for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return the requested sending authorization policies for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " }, "GetIdentityPoliciesResponse":{ "type":"structure", @@ -2384,7 +2384,7 @@ "documentation":"

    A list of identities.

    " } }, - "documentation":"

    Represents a request to return the Amazon SES verification status of a list of identities. For domain identities, this request also returns the verification token. For information about verifying identities with Amazon SES, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return the Amazon SES verification status of a list of identities. For domain identities, this request also returns the verification token. For information about verifying identities with Amazon SES, see the Amazon SES Developer Guide.

    " }, "GetIdentityVerificationAttributesResponse":{ "type":"structure", @@ -2431,7 +2431,7 @@ "members":{ "TemplateName":{ "shape":"TemplateName", - "documentation":"

    The name of the template you want to retrieve.

    " + "documentation":"

    The name of the template to retrieve.

    " } } }, @@ -2462,7 +2462,7 @@ }, "DkimTokens":{ "shape":"VerificationTokenList", - "documentation":"

    A set of character strings that represent the domain's identity. Using these tokens, you need to create DNS CNAME records that point to DKIM public keys that are hosted by Amazon SES. Amazon Web Services eventually detects that you've updated your DNS records. This detection process might take up to 72 hours. After successful detection, Amazon SES is able to DKIM-sign email originating from that domain. (This only applies to domain identities, not email address identities.)

    For more information about creating DNS records using DKIM tokens, see the Amazon SES Developer Guide.

    " + "documentation":"

    A set of character strings that represent the domain's identity. Using these tokens, you need to create DNS CNAME records that point to DKIM public keys that are hosted by Amazon SES. Amazon Web Services eventually detects that you've updated your DNS records. This detection process might take up to 72 hours. After successful detection, Amazon SES is able to DKIM-sign email originating from that domain. (This only applies to domain identities, not email address identities.)

    For more information about creating DNS records using DKIM tokens, see the Amazon SES Developer Guide.

    " } }, "documentation":"

    Represents the DKIM attributes of a verified email address or a domain.

    " @@ -2505,31 +2505,31 @@ "members":{ "BounceTopic":{ "shape":"NotificationTopic", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish bounce notifications.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES publishes bounce notifications.

    " }, "ComplaintTopic":{ "shape":"NotificationTopic", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish complaint notifications.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES publishes complaint notifications.

    " }, "DeliveryTopic":{ "shape":"NotificationTopic", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will publish delivery notifications.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES publishes delivery notifications.

    " }, "ForwardingEnabled":{ "shape":"Enabled", - "documentation":"

    Describes whether Amazon SES will forward bounce and complaint notifications as email. true indicates that Amazon SES will forward bounce and complaint notifications as email, while false indicates that bounce and complaint notifications will be published only to the specified bounce and complaint Amazon SNS topics.

    " + "documentation":"

    Describes whether Amazon SES forwards bounce and complaint notifications as email. true indicates that Amazon SES forwards bounce and complaint notifications as email, while false indicates that bounce and complaint notifications are published only to the specified bounce and complaint Amazon SNS topics.

    " }, "HeadersInBounceNotificationsEnabled":{ "shape":"Enabled", - "documentation":"

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Bounce. A value of true specifies that Amazon SES will include headers in bounce notifications, and a value of false specifies that Amazon SES will not include headers in bounce notifications.

    " + "documentation":"

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Bounce. A value of true specifies that Amazon SES includes headers in bounce notifications, and a value of false specifies that Amazon SES does not include headers in bounce notifications.

    " }, "HeadersInComplaintNotificationsEnabled":{ "shape":"Enabled", - "documentation":"

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Complaint. A value of true specifies that Amazon SES will include headers in complaint notifications, and a value of false specifies that Amazon SES will not include headers in complaint notifications.

    " + "documentation":"

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Complaint. A value of true specifies that Amazon SES includes headers in complaint notifications, and a value of false specifies that Amazon SES does not include headers in complaint notifications.

    " }, "HeadersInDeliveryNotificationsEnabled":{ "shape":"Enabled", - "documentation":"

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Delivery. A value of true specifies that Amazon SES will include headers in delivery notifications, and a value of false specifies that Amazon SES will not include headers in delivery notifications.

    " + "documentation":"

    Describes whether Amazon SES includes the original email headers in Amazon SNS notifications of type Delivery. A value of true specifies that Amazon SES includes headers in delivery notifications, and a value of false specifies that Amazon SES does not include headers in delivery notifications.

    " } }, "documentation":"

    Represents the notification attributes of an identity, including whether an identity has Amazon Simple Notification Service (Amazon SNS) topics set for bounce, complaint, and/or delivery notifications, and whether feedback forwarding is enabled for bounce and complaint notifications.

    " @@ -2628,7 +2628,7 @@ "documentation":"

    Indicates that the ARN of the function was not found.

    " } }, - "documentation":"

    Indicates that the provided AWS Lambda function is invalid, or that Amazon SES could not execute the provided function, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", + "documentation":"

    Indicates that the provided Amazon Web Services Lambda function is invalid, or that Amazon SES could not execute the provided function, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", "error":{ "code":"InvalidLambdaFunction", "httpStatusCode":400, @@ -2669,7 +2669,7 @@ "documentation":"

    Indicated that the S3 Bucket was not found.

    " } }, - "documentation":"

    Indicates that the provided Amazon S3 bucket or AWS KMS encryption key is invalid, or that Amazon SES could not publish to the bucket, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", + "documentation":"

    Indicates that the provided Amazon S3 bucket or Amazon Web Services KMS encryption key is invalid, or that Amazon SES could not publish to the bucket, possibly due to permissions issues. For information about giving permissions, see the Amazon SES Developer Guide.

    ", "error":{ "code":"InvalidS3Configuration", "httpStatusCode":400, @@ -2761,7 +2761,7 @@ "documentation":"

    The ARN of the Amazon Kinesis Firehose stream that email sending events should be published to.

    " } }, - "documentation":"

    Contains the delivery stream ARN and the IAM role ARN associated with an Amazon Kinesis Firehose event destination.

    Event destinations, such as Amazon Kinesis Firehose, are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Contains the delivery stream ARN and the IAM role ARN associated with an Amazon Kinesis Firehose event destination.

    Event destinations, such as Amazon Kinesis Firehose, are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "LambdaAction":{ "type":"structure", @@ -2769,18 +2769,18 @@ "members":{ "TopicArn":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the Lambda action is executed. You can find the ARN of a topic by using the ListTopics operation in Amazon SNS.

    For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " }, "FunctionArn":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) of the AWS Lambda function. An example of an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. For more information about AWS Lambda, see the AWS Lambda Developer Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Web Services Lambda function. An example of an Amazon Web Services Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. For more information about Amazon Web Services Lambda, see the Amazon Web Services Lambda Developer Guide.

    " }, "InvocationType":{ "shape":"InvocationType", - "documentation":"

    The invocation type of the AWS Lambda function. An invocation type of RequestResponse means that the execution of the function will immediately result in a response, and a value of Event means that the function will be invoked asynchronously. The default value is Event. For information about AWS Lambda invocation types, see the AWS Lambda Developer Guide.

    There is a 30-second timeout on RequestResponse invocations. You should use Event invocation in most cases. Use RequestResponse only when you want to make a mail flow decision, such as whether to stop the receipt rule or the receipt rule set.

    " + "documentation":"

    The invocation type of the Amazon Web Services Lambda function. An invocation type of RequestResponse means that the execution of the function immediately results in a response, and a value of Event means that the function is invoked asynchronously. The default value is Event. For information about Amazon Web Services Lambda invocation types, see the Amazon Web Services Lambda Developer Guide.

    There is a 30-second timeout on RequestResponse invocations. You should use Event invocation in most cases. Use RequestResponse only to make a mail flow decision, such as whether to stop the receipt rule or the receipt rule set.

    " } }, - "documentation":"

    When included in a receipt rule, this action calls an AWS Lambda function and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    To enable Amazon SES to call your AWS Lambda function or to publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about giving permissions, see the Amazon SES Developer Guide.

    For information about using AWS Lambda actions in receipt rules, see the Amazon SES Developer Guide.

    " + "documentation":"

    When included in a receipt rule, this action calls an Amazon Web Services Lambda function and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    To enable Amazon SES to call your Amazon Web Services Lambda function or to publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about giving permissions, see the Amazon SES Developer Guide.

    For information about using Amazon Web Services Lambda actions in receipt rules, see the Amazon SES Developer Guide.

    " }, "LastAttemptDate":{"type":"timestamp"}, "LastFreshStart":{"type":"timestamp"}, @@ -2808,7 +2808,7 @@ "documentation":"

    The number of configuration sets to return.

    " } }, - "documentation":"

    Represents a request to list the configuration sets associated with your AWS account. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to list the configuration sets associated with your Amazon Web Services account. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "ListConfigurationSetsResponse":{ "type":"structure", @@ -2822,7 +2822,7 @@ "documentation":"

    A token indicating that there are additional configuration sets available to be listed. Pass this token to successive calls of ListConfigurationSets.

    " } }, - "documentation":"

    A list of configuration sets associated with your AWS account. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    A list of configuration sets associated with your Amazon Web Services account. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "ListCustomVerificationEmailTemplatesRequest":{ "type":"structure", @@ -2833,10 +2833,10 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of custom verification email templates to return. This value must be at least 1 and less than or equal to 50. If you do not specify a value, or if you specify a value less than 1 or greater than 50, the operation will return up to 50 results.

    " + "documentation":"

    The maximum number of custom verification email templates to return. This value must be at least 1 and less than or equal to 50. If you do not specify a value, or if you specify a value less than 1 or greater than 50, the operation returns up to 50 results.

    " } }, - "documentation":"

    Represents a request to list the existing custom verification email templates for your account.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to list the existing custom verification email templates for your account.

    For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

    " }, "ListCustomVerificationEmailTemplatesResponse":{ "type":"structure", @@ -2857,7 +2857,7 @@ "members":{ "IdentityType":{ "shape":"IdentityType", - "documentation":"

    The type of the identities to list. Possible values are \"EmailAddress\" and \"Domain\". If this parameter is omitted, then all identities will be listed.

    " + "documentation":"

    The type of the identities to list. Possible values are \"EmailAddress\" and \"Domain\". If this parameter is omitted, then all identities are listed.

    " }, "NextToken":{ "shape":"NextToken", @@ -2868,7 +2868,7 @@ "documentation":"

    The maximum number of identities per page. Possible values are 1-1000 inclusive.

    " } }, - "documentation":"

    Represents a request to return a list of all identities (email addresses and domains) that you have attempted to verify under your AWS account, regardless of verification status.

    " + "documentation":"

    Represents a request to return a list of all identities (email addresses and domains) that you have attempted to verify under your Amazon Web Services account, regardless of verification status.

    " }, "ListIdentitiesResponse":{ "type":"structure", @@ -2883,7 +2883,7 @@ "documentation":"

    The token used for pagination.

    " } }, - "documentation":"

    A list of all identities that you have attempted to verify under your AWS account, regardless of verification status.

    " + "documentation":"

    A list of all identities that you have attempted to verify under your Amazon Web Services account, regardless of verification status.

    " }, "ListIdentityPoliciesRequest":{ "type":"structure", @@ -2891,10 +2891,10 @@ "members":{ "Identity":{ "shape":"Identity", - "documentation":"

    The identity that is associated with the policy for which the policies will be listed. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    " + "documentation":"

    The identity that is associated with the policy for which the policies are listed. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this operation, you must own the identity.

    " } }, - "documentation":"

    Represents a request to return a list of sending authorization policies that are attached to an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to return a list of sending authorization policies that are attached to an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " }, "ListIdentityPoliciesResponse":{ "type":"structure", @@ -2911,7 +2911,7 @@ "type":"structure", "members":{ }, - "documentation":"

    Represents a request to list the IP address filters that exist under your AWS account. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to list the IP address filters that exist under your Amazon Web Services account. You use IP address filters when you receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "ListReceiptFiltersResponse":{ "type":"structure", @@ -2921,7 +2921,7 @@ "documentation":"

    A list of IP address filter data structures, which each consist of a name, an IP address range, and whether to allow or block mail from it.

    " } }, - "documentation":"

    A list of IP address filters that exist under your AWS account.

    " + "documentation":"

    A list of IP address filters that exist under your Amazon Web Services account.

    " }, "ListReceiptRuleSetsRequest":{ "type":"structure", @@ -2931,7 +2931,7 @@ "documentation":"

    A token returned from a previous call to ListReceiptRuleSets to indicate the position in the receipt rule set list.

    " } }, - "documentation":"

    Represents a request to list the receipt rule sets that exist under your AWS account. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to list the receipt rule sets that exist under your Amazon Web Services account. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "ListReceiptRuleSetsResponse":{ "type":"structure", @@ -2945,7 +2945,7 @@ "documentation":"

    A token indicating that there are additional receipt rule sets available to be listed. Pass this token to successive calls of ListReceiptRuleSets to retrieve up to 100 receipt rule sets at a time.

    " } }, - "documentation":"

    A list of receipt rule sets that exist under your AWS account.

    " + "documentation":"

    A list of receipt rule sets that exist under your Amazon Web Services account.

    " }, "ListTemplatesRequest":{ "type":"structure", @@ -2956,7 +2956,7 @@ }, "MaxItems":{ "shape":"MaxItems", - "documentation":"

    The maximum number of templates to return. This value must be at least 1 and less than or equal to 10. If you do not specify a value, or if you specify a value less than 1 or greater than 10, the operation will return up to 10 results.

    " + "documentation":"

    The maximum number of templates to return. This value must be at least 1 and less than or equal to 100. If more than 100 items are requested, the page size will automatically set to 100. If you do not specify a value, 10 is the default page size.

    " } } }, @@ -2969,7 +2969,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    A token indicating that there are additional email templates available to be listed. Pass this token to a subsequent call to ListTemplates to retrieve the next 50 email templates.

    " + "documentation":"

    A token indicating that there are additional email templates available to be listed. Pass this token to a subsequent call to ListTemplates to retrieve the next set of email templates within your page size.

    " } } }, @@ -2981,7 +2981,7 @@ "documentation":"

    A list of email addresses that have been verified.

    " } }, - "documentation":"

    A list of email addresses that you have verified with Amazon SES under your AWS account.

    " + "documentation":"

    A list of email addresses that you have verified with Amazon SES under your Amazon Web Services account.

    " }, "MailFromDomainAttributes":{ "type":"map", @@ -3019,7 +3019,7 @@ "members":{ "Subject":{ "shape":"Content", - "documentation":"

    The subject of the message: A short summary of the content, which will appear in the recipient's inbox.

    " + "documentation":"

    The subject of the message: A short summary of the content, which appears in the recipient's inbox.

    " }, "Body":{ "shape":"Body", @@ -3046,7 +3046,7 @@ "documentation":"

    Additional X-headers to include in the DSN.

    " } }, - "documentation":"

    Message-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    " + "documentation":"

    Message-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    " }, "MessageId":{"type":"string"}, "MessageRejected":{ @@ -3070,14 +3070,14 @@ "members":{ "Name":{ "shape":"MessageTagName", - "documentation":"

    The name of the tag. The name must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain less than 256 characters.

    " + "documentation":"

    The name of the tag. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain 256 characters or fewer.

    " }, "Value":{ "shape":"MessageTagValue", - "documentation":"

    The value of the tag. The value must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain less than 256 characters.

    " + "documentation":"

    The value of the tag. The value must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Contain 256 characters or fewer.

    " } }, - "documentation":"

    Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.

    Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.

    Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "MessageTagList":{ "type":"list", @@ -3149,7 +3149,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

    The name of the configuration set that you want to specify the delivery options for.

    " + "documentation":"

    The name of the configuration set.

    " }, "DeliveryOptions":{ "shape":"DeliveryOptions", @@ -3174,7 +3174,7 @@ "members":{ "Identity":{ "shape":"Identity", - "documentation":"

    The identity that the policy will apply to. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this API, you must own the identity.

    " + "documentation":"

    The identity to which that the policy applies. You can specify an identity by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    To successfully call this operation, you must own the identity.

    " }, "PolicyName":{ "shape":"PolicyName", @@ -3182,10 +3182,10 @@ }, "Policy":{ "shape":"Policy", - "documentation":"

    The text of the policy in JSON format. The policy cannot exceed 4 KB.

    For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide.

    " + "documentation":"

    The text of the policy in JSON format. The policy cannot exceed 4 KB.

    For information about the syntax of sending authorization policies, see the Amazon SES Developer Guide.

    " } }, - "documentation":"

    Represents a request to add or update a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to add or update a sending authorization policy for an identity. Sending authorization is an Amazon SES feature that enables you to authorize other senders to use your identities. For information, see the Amazon SES Developer Guide.

    " }, "PutIdentityPolicyResponse":{ "type":"structure", @@ -3199,7 +3199,7 @@ "members":{ "Data":{ "shape":"RawMessageData", - "documentation":"

    The raw data of the message. This data needs to base64-encoded if you are accessing Amazon SES directly through the HTTPS interface. If you are accessing Amazon SES using an AWS SDK, the SDK takes care of the base 64-encoding for you. In all cases, the client must ensure that the message format complies with Internet email standards regarding email header fields, MIME types, and MIME encoding.

    The To:, CC:, and BCC: headers in the raw message can contain a group list.

    If you are using SendRawEmail with sending authorization, you can include X-headers in the raw message to specify the \"Source,\" \"From,\" and \"Return-Path\" addresses. For more information, see the documentation for SendRawEmail.

    Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email.

    For more information, go to the Amazon SES Developer Guide.

    " + "documentation":"

    The raw data of the message. This data needs to base64-encoded if you are accessing Amazon SES directly through the HTTPS interface. If you are accessing Amazon SES using an Amazon Web Services SDK, the SDK takes care of the base 64-encoding for you. In all cases, the client must ensure that the message format complies with Internet email standards regarding email header fields, MIME types, and MIME encoding.

    The To:, CC:, and BCC: headers in the raw message can contain a group list.

    If you are using SendRawEmail with sending authorization, you can include X-headers in the raw message to specify the \"Source,\" \"From,\" and \"Return-Path\" addresses. For more information, see the documentation for SendRawEmail.

    Do not include these X-headers in the DKIM signature, because they are removed by Amazon SES before sending the email.

    For more information, go to the Amazon SES Developer Guide.

    " } }, "documentation":"

    Represents the raw data of the message.

    " @@ -3222,7 +3222,7 @@ }, "LambdaAction":{ "shape":"LambdaAction", - "documentation":"

    Calls an AWS Lambda function, and optionally, publishes a notification to Amazon SNS.

    " + "documentation":"

    Calls an Amazon Web Services Lambda function, and optionally, publishes a notification to Amazon SNS.

    " }, "StopAction":{ "shape":"StopAction", @@ -3237,7 +3237,7 @@ "documentation":"

    Publishes the email content within a notification to Amazon SNS.

    " } }, - "documentation":"

    An action that Amazon SES can take when it receives an email on behalf of one or more email addresses or domains that you own. An instance of this data type can represent only one action.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    " + "documentation":"

    An action that Amazon SES can take when it receives an email on behalf of one or more email addresses or domains that you own. An instance of this data type can represent only one action.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    " }, "ReceiptActionsList":{ "type":"list", @@ -3252,14 +3252,14 @@ "members":{ "Name":{ "shape":"ReceiptFilterName", - "documentation":"

    The name of the IP address filter. The name must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    " + "documentation":"

    The name of the IP address filter. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain 64 characters or fewer.

    " }, "IpFilter":{ "shape":"ReceiptIpFilter", "documentation":"

    A structure that provides the IP addresses to block or allow, and whether to block or allow incoming mail from them.

    " } }, - "documentation":"

    A receipt IP address filter enables you to specify whether to accept or reject mail originating from an IP address or range of IP addresses.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    " + "documentation":"

    A receipt IP address filter enables you to specify whether to accept or reject mail originating from an IP address or range of IP addresses.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    " }, "ReceiptFilterList":{ "type":"list", @@ -3286,10 +3286,10 @@ }, "Cidr":{ "shape":"Cidr", - "documentation":"

    A single IP address or a range of IP addresses that you want to block or allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example of a single email address is 10.0.0.1. An example of a range of IP addresses is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317.

    " + "documentation":"

    A single IP address or a range of IP addresses to block or allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example of a single email address is 10.0.0.1. An example of a range of IP addresses is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317.

    " } }, - "documentation":"

    A receipt IP address filter enables you to specify whether to accept or reject mail originating from an IP address or range of IP addresses.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    " + "documentation":"

    A receipt IP address filter enables you to specify whether to accept or reject mail originating from an IP address or range of IP addresses.

    For information about setting up IP address filters, see the Amazon SES Developer Guide.

    " }, "ReceiptRule":{ "type":"structure", @@ -3297,7 +3297,7 @@ "members":{ "Name":{ "shape":"ReceiptRuleName", - "documentation":"

    The name of the receipt rule. The name must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    " + "documentation":"

    The name of the receipt rule. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), dashes (-), or periods (.).

    • Start and end with a letter or number.

    • Contain 64 characters or fewer.

    " }, "Enabled":{ "shape":"Enabled", @@ -3305,11 +3305,11 @@ }, "TlsPolicy":{ "shape":"TlsPolicy", - "documentation":"

    Specifies whether Amazon SES should require that incoming email is delivered over a connection encrypted with Transport Layer Security (TLS). If this parameter is set to Require, Amazon SES will bounce emails that are not received over TLS. The default is Optional.

    " + "documentation":"

    Specifies whether Amazon SES should require that incoming email is delivered over a connection encrypted with Transport Layer Security (TLS). If this parameter is set to Require, Amazon SES bounces emails that are not received over TLS. The default is Optional.

    " }, "Recipients":{ "shape":"RecipientsList", - "documentation":"

    The recipient domains and email addresses that the receipt rule applies to. If this field is not specified, this rule will match all recipients under all verified domains.

    " + "documentation":"

    The recipient domains and email addresses that the receipt rule applies to. If this field is not specified, this rule matches all recipients on all verified domains.

    " }, "Actions":{ "shape":"ReceiptActionsList", @@ -3320,7 +3320,7 @@ "documentation":"

    If true, then messages that this receipt rule applies to are scanned for spam and viruses. The default value is false.

    " } }, - "documentation":"

    Receipt rules enable you to specify which actions Amazon SES should take when it receives mail on behalf of one or more email addresses or domains that you own.

    Each receipt rule defines a set of email addresses or domains that it applies to. If the email addresses or domains match at least one recipient address of the message, Amazon SES executes all of the receipt rule's actions on the message.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    " + "documentation":"

    Receipt rules enable you to specify which actions Amazon SES should take when it receives mail on behalf of one or more email addresses or domains that you own.

    Each receipt rule defines a set of email addresses or domains that it applies to. If the email addresses or domains match at least one recipient address of the message, Amazon SES executes all of the receipt rule's actions on the message.

    For information about setting up receipt rules, see the Amazon SES Developer Guide.

    " }, "ReceiptRuleName":{"type":"string"}, "ReceiptRuleNamesList":{ @@ -3332,14 +3332,14 @@ "members":{ "Name":{ "shape":"ReceiptRuleSetName", - "documentation":"

    The name of the receipt rule set. The name must:

    • This value can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain less than 64 characters.

    " + "documentation":"

    The name of the receipt rule set. The name must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • Start and end with a letter or number.

    • Contain 64 characters or fewer.

    " }, "CreatedTimestamp":{ "shape":"Timestamp", "documentation":"

    The date and time the receipt rule set was created.

    " } }, - "documentation":"

    Information about a receipt rule set.

    A receipt rule set is a collection of rules that specify what Amazon SES should do with mail it receives on behalf of your account's verified domains.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Information about a receipt rule set.

    A receipt rule set is a collection of rules that specify what Amazon SES should do with mail it receives on behalf of your account's verified domains.

    For information about setting up receipt rule sets, see the Amazon SES Developer Guide.

    " }, "ReceiptRuleSetName":{"type":"string"}, "ReceiptRuleSetsLists":{ @@ -3360,7 +3360,7 @@ "members":{ "FinalRecipient":{ "shape":"Address", - "documentation":"

    The email address that the message was ultimately delivered to. This corresponds to the Final-Recipient in the DSN. If not specified, FinalRecipient will be set to the Recipient specified in the BouncedRecipientInfo structure. Either FinalRecipient or the recipient in BouncedRecipientInfo must be a recipient of the original bounced message.

    Do not prepend the FinalRecipient email address with rfc 822;, as described in RFC 3798.

    " + "documentation":"

    The email address that the message was ultimately delivered to. This corresponds to the Final-Recipient in the DSN. If not specified, FinalRecipient is set to the Recipient specified in the BouncedRecipientInfo structure. Either FinalRecipient or the recipient in BouncedRecipientInfo must be a recipient of the original bounced message.

    Do not prepend the FinalRecipient email address with rfc 822;, as described in RFC 3798.

    " }, "Action":{ "shape":"DsnAction", @@ -3387,7 +3387,7 @@ "documentation":"

    Additional X-headers to include in the DSN.

    " } }, - "documentation":"

    Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    " + "documentation":"

    Recipient-related information to include in the Delivery Status Notification (DSN) when an email that Amazon SES receives on your behalf bounces.

    For information about receiving email through Amazon SES, see the Amazon SES Developer Guide.

    " }, "RecipientsList":{ "type":"list", @@ -3408,10 +3408,10 @@ }, "RuleNames":{ "shape":"ReceiptRuleNamesList", - "documentation":"

    A list of the specified receipt rule set's receipt rules in the order that you want to put them.

    " + "documentation":"

    The specified receipt rule set's receipt rules, in order.

    " } }, - "documentation":"

    Represents a request to reorder the receipt rules within a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to reorder the receipt rules within a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "ReorderReceiptRuleSetResponse":{ "type":"structure", @@ -3425,7 +3425,7 @@ "members":{ "SendingEnabled":{ "shape":"Enabled", - "documentation":"

    Describes whether email sending is enabled or disabled for the configuration set. If the value is true, then Amazon SES will send emails that use the configuration set. If the value is false, Amazon SES will not send emails that use the configuration set. The default value is true. You can change this setting using UpdateConfigurationSetSendingEnabled.

    " + "documentation":"

    Describes whether email sending is enabled or disabled for the configuration set. If the value is true, then Amazon SES sends emails that use the configuration set. If the value is false, Amazon SES does not send emails that use the configuration set. The default value is true. You can change this setting using UpdateConfigurationSetSendingEnabled.

    " }, "ReputationMetricsEnabled":{ "shape":"Enabled", @@ -3477,11 +3477,11 @@ "members":{ "TopicArn":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the Amazon SNS topic to notify when the message is saved to the Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " + "documentation":"

    The ARN of the Amazon SNS topic to notify when the message is saved to the Amazon S3 bucket. You can find the ARN of a topic by using the ListTopics operation in Amazon SNS.

    For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " }, "BucketName":{ "shape":"S3BucketName", - "documentation":"

    The name of the Amazon S3 bucket that incoming email will be saved to.

    " + "documentation":"

    The name of the Amazon S3 bucket for incoming email.

    " }, "ObjectKeyPrefix":{ "shape":"S3KeyPrefix", @@ -3489,10 +3489,10 @@ }, "KmsKeyArn":{ "shape":"AmazonResourceName", - "documentation":"

    The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key you created in AWS KMS as follows:

    • To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) region, the ARN of the default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.

    • To use a custom master key you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the Amazon SES Developer Guide.

    For more information about key policies, see the AWS KMS Developer Guide. If you do not specify a master key, Amazon SES will not encrypt your emails.

    Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the AWS SDK for Java and AWS SDK for Ruby only. For more information about client-side encryption using AWS KMS master keys, see the Amazon S3 Developer Guide.

    " + "documentation":"

    The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key that you created in Amazon Web Services KMS as follows:

    • To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. For example, if your Amazon Web Services account ID is 123456789012 and you want to use the default master key in the US West (Oregon) Region, the ARN of the default master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.

    • To use a custom master key that you created in Amazon Web Services KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the Amazon SES Developer Guide.

    For more information about key policies, see the Amazon Web Services KMS Developer Guide. If you do not specify a master key, Amazon SES does not encrypt your emails.

    Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your Amazon Web Services KMS keys for decryption. This encryption client is currently available with the Amazon Web Services SDK for Java and Amazon Web Services SDK for Ruby only. For more information about client-side encryption using Amazon Web Services KMS master keys, see the Amazon S3 Developer Guide.

    " } }, - "documentation":"

    When included in a receipt rule, this action saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS KMS key to encrypt your emails, or publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about giving permissions, see the Amazon SES Developer Guide.

    When you save your emails to an Amazon S3 bucket, the maximum email size (including headers) is 30 MB. Emails larger than that will bounce.

    For information about specifying Amazon S3 actions in receipt rules, see the Amazon SES Developer Guide.

    " + "documentation":"

    When included in a receipt rule, this action saves the received message to an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    To enable Amazon SES to write emails to your Amazon S3 bucket, use an Amazon Web Services KMS key to encrypt your emails, or publish to an Amazon SNS topic of another account, Amazon SES must have permission to access those resources. For information about granting permissions, see the Amazon SES Developer Guide.

    When you save your emails to an Amazon S3 bucket, the maximum email size (including headers) is 40 MB. Emails larger than that bounces.

    For information about specifying Amazon S3 actions in receipt rules, see the Amazon SES Developer Guide.

    " }, "S3BucketName":{"type":"string"}, "S3KeyPrefix":{"type":"string"}, @@ -3502,14 +3502,14 @@ "members":{ "TopicArn":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. You can find the ARN of a topic by using the ListTopics operation in Amazon SNS.

    For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " }, "Encoding":{ "shape":"SNSActionEncoding", "documentation":"

    The encoding to use for the email within the Amazon SNS notification. UTF-8 is easier to use, but may not preserve all special characters when a message was encoded with a different encoding format. Base64 preserves all special characters. The default value is UTF-8.

    " } }, - "documentation":"

    When included in a receipt rule, this action publishes a notification to Amazon Simple Notification Service (Amazon SNS). This action includes a complete copy of the email content in the Amazon SNS notifications. Amazon SNS notifications for all other actions simply provide information about the email. They do not include the email content itself.

    If you own the Amazon SNS topic, you don't need to do anything to give Amazon SES permission to publish emails to it. However, if you don't own the Amazon SNS topic, you need to attach a policy to the topic to give Amazon SES permissions to access it. For information about giving permissions, see the Amazon SES Developer Guide.

    You can only publish emails that are 150 KB or less (including the header) to Amazon SNS. Larger emails will bounce. If you anticipate emails larger than 150 KB, use the S3 action instead.

    For information about using a receipt rule to publish an Amazon SNS notification, see the Amazon SES Developer Guide.

    " + "documentation":"

    When included in a receipt rule, this action publishes a notification to Amazon Simple Notification Service (Amazon SNS). This action includes a complete copy of the email content in the Amazon SNS notifications. Amazon SNS notifications for all other actions simply provide information about the email. They do not include the email content itself.

    If you own the Amazon SNS topic, you don't need to do anything to give Amazon SES permission to publish emails to it. However, if you don't own the Amazon SNS topic, you need to attach a policy to the topic to give Amazon SES permissions to access it. For information about giving permissions, see the Amazon SES Developer Guide.

    You can only publish emails that are 150 KB or less (including the header) to Amazon SNS. Larger emails bounce. If you anticipate emails larger than 150 KB, use the S3 action instead.

    For information about using a receipt rule to publish an Amazon SNS notification, see the Amazon SES Developer Guide.

    " }, "SNSActionEncoding":{ "type":"string", @@ -3524,10 +3524,10 @@ "members":{ "TopicARN":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the Amazon SNS topic that email sending events will be published to. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " + "documentation":"

    The ARN of the Amazon SNS topic for email sending events. You can find the ARN of a topic by using the ListTopics Amazon SNS operation.

    For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " } }, - "documentation":"

    Contains the topic ARN associated with an Amazon Simple Notification Service (Amazon SNS) event destination.

    Event destinations, such as Amazon SNS, are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Contains the topic ARN associated with an Amazon Simple Notification Service (Amazon SNS) event destination.

    Event destinations, such as Amazon SNS, are associated with configuration sets, which enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "SendBounceRequest":{ "type":"structure", @@ -3547,11 +3547,11 @@ }, "Explanation":{ "shape":"Explanation", - "documentation":"

    Human-readable text for the bounce message to explain the failure. If not specified, the text will be auto-generated based on the bounced recipient information.

    " + "documentation":"

    Human-readable text for the bounce message to explain the failure. If not specified, the text is auto-generated based on the bounced recipient information.

    " }, "MessageDsn":{ "shape":"MessageDsn", - "documentation":"

    Message-related DSN fields. If not specified, Amazon SES will choose the values.

    " + "documentation":"

    Message-related DSN fields. If not specified, Amazon SES chooses the values.

    " }, "BouncedRecipientInfoList":{ "shape":"BouncedRecipientInfoList", @@ -3559,7 +3559,7 @@ }, "BounceSenderArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the address in the \"From\" header of the bounce. For more information about sending authorization, see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the address in the \"From\" header of the bounce. For more information about sending authorization, see the Amazon SES Developer Guide.

    " } }, "documentation":"

    Represents a request to send a bounce message to the sender of an email you received through Amazon SES.

    " @@ -3584,23 +3584,23 @@ "members":{ "Source":{ "shape":"Address", - "documentation":"

    The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide.

    If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492. The sender name (also known as the friendly name) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in RFC 2047. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=.

    " + "documentation":"

    The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide.

    If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the email address string must be 7-bit ASCII. If you want to send to or from email addresses that contain Unicode characters in the domain part of an address, you must encode the domain using Punycode. Punycode is not permitted in the local part of the email address (the part before the @ sign) nor in the \"friendly from\" name. If you want to use Unicode characters in the \"friendly from\" name, you must encode the \"friendly from\" name using MIME encoded-word syntax, as described in Sending raw email using the Amazon SES API. For more information about Punycode, see RFC 3492.

    " }, "SourceArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " }, "ReplyToAddresses":{ "shape":"AddressList", - "documentation":"

    The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.

    " + "documentation":"

    The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address receives the reply.

    " }, "ReturnPath":{ "shape":"Address", - "documentation":"

    The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.

    " + "documentation":"

    The email address that bounces and complaints are forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message is returned from the recipient's ISP; this message is forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.

    " }, "ReturnPathArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " }, "ConfigurationSetName":{ "shape":"ConfigurationSetName", @@ -3624,10 +3624,10 @@ }, "Destinations":{ "shape":"BulkEmailDestinationList", - "documentation":"

    One or more Destination objects. All of the recipients in a Destination will receive the same version of the email. You can specify up to 50 Destination objects within a Destinations array.

    " + "documentation":"

    One or more Destination objects. All of the recipients in a Destination receive the same version of the email. You can specify up to 50 Destination objects within a Destinations array.

    " } }, - "documentation":"

    Represents a request to send a templated email to multiple destinations using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to send a templated email to multiple destinations using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "SendBulkTemplatedEmailResponse":{ "type":"structure", @@ -3635,7 +3635,7 @@ "members":{ "Status":{ "shape":"BulkEmailDestinationStatusList", - "documentation":"

    The unique message identifier returned from the SendBulkTemplatedEmail action.

    " + "documentation":"

    One object per intended recipient. Check each response object and retry any messages with a failure status. (Note that order of responses will be respective to order of destinations in the request.)Receipt rules enable you to specify which actions

    " } } }, @@ -3711,7 +3711,7 @@ "members":{ "Source":{ "shape":"Address", - "documentation":"

    The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide.

    If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492. The sender name (also known as the friendly name) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in RFC 2047. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=.

    " + "documentation":"

    The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide.

    If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the email address string must be 7-bit ASCII. If you want to send to or from email addresses that contain Unicode characters in the domain part of an address, you must encode the domain using Punycode. Punycode is not permitted in the local part of the email address (the part before the @ sign) nor in the \"friendly from\" name. If you want to use Unicode characters in the \"friendly from\" name, you must encode the \"friendly from\" name using MIME encoded-word syntax, as described in Sending raw email using the Amazon SES API. For more information about Punycode, see RFC 3492.

    " }, "Destination":{ "shape":"Destination", @@ -3723,19 +3723,19 @@ }, "ReplyToAddresses":{ "shape":"AddressList", - "documentation":"

    The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.

    " + "documentation":"

    The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address receives the reply.

    " }, "ReturnPath":{ "shape":"Address", - "documentation":"

    The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.

    " + "documentation":"

    The email address that bounces and complaints are forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message is returned from the recipient's ISP; this message is forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.

    " }, "SourceArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " }, "ReturnPathArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " }, "Tags":{ "shape":"MessageTagList", @@ -3746,7 +3746,7 @@ "documentation":"

    The name of the configuration set to use when you send an email using SendEmail.

    " } }, - "documentation":"

    Represents a request to send a single formatted email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to send a single formatted email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "SendEmailResponse":{ "type":"structure", @@ -3765,7 +3765,7 @@ "members":{ "Source":{ "shape":"Address", - "documentation":"

    The identity's email address. If you do not provide a value for this parameter, you must specify a \"From\" address in the raw text of the message. (You can also specify both.)

    Amazon SES does not support the SMTPUTF8 extension, as described inRFC6531. For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492. The sender name (also known as the friendly name) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described in RFC 2047. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=.

    If you specify the Source parameter and have feedback forwarding enabled, then bounces and complaints will be sent to this email address. This takes precedence over any Return-Path header that you might include in the raw text of the message.

    " + "documentation":"

    The identity's email address. If you do not provide a value for this parameter, you must specify a \"From\" address in the raw text of the message. (You can also specify both.)

    Amazon SES does not support the SMTPUTF8 extension, as described inRFC6531. For this reason, the email address string must be 7-bit ASCII. If you want to send to or from email addresses that contain Unicode characters in the domain part of an address, you must encode the domain using Punycode. Punycode is not permitted in the local part of the email address (the part before the @ sign) nor in the \"friendly from\" name. If you want to use Unicode characters in the \"friendly from\" name, you must encode the \"friendly from\" name using MIME encoded-word syntax, as described in Sending raw email using the Amazon SES API. For more information about Punycode, see RFC 3492.

    If you specify the Source parameter and have feedback forwarding enabled, then bounces and complaints are sent to this email address. This takes precedence over any Return-Path header that you might include in the raw text of the message.

    " }, "Destinations":{ "shape":"AddressList", @@ -3773,19 +3773,19 @@ }, "RawMessage":{ "shape":"RawMessage", - "documentation":"

    The raw email message itself. The message has to meet the following criteria:

    • The message has to contain a header and a body, separated by a blank line.

    • All of the required header fields must be present in the message.

    • Each part of a multipart MIME message must be formatted properly.

    • Attachments must be of a content type that Amazon SES supports. For a list on unsupported content types, see Unsupported Attachment Types in the Amazon SES Developer Guide.

    • The entire message must be base64-encoded.

    • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, we highly recommend that you encode that content. For more information, see Sending Raw Email in the Amazon SES Developer Guide.

    • Per RFC 5321, the maximum length of each line of text, including the <CRLF>, must not exceed 1,000 characters.

    " + "documentation":"

    The raw email message itself. The message has to meet the following criteria:

    • The message has to contain a header and a body, separated by a blank line.

    • All of the required header fields must be present in the message.

    • Each part of a multipart MIME message must be formatted properly.

    • Attachments must be of a content type that Amazon SES supports. For a list on unsupported content types, see Unsupported Attachment Types in the Amazon SES Developer Guide.

    • The entire message must be base64-encoded.

    • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, we highly recommend that you encode that content. For more information, see Sending Raw Email in the Amazon SES Developer Guide.

    • Per RFC 5321, the maximum length of each line of text, including the <CRLF>, must not exceed 1,000 characters.

    " }, "FromArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to specify a particular \"From\" address in the header of the raw email.

    Instead of using this parameter, you can use the X-header X-SES-FROM-ARN in the raw message of the email. If you use both the FromArn parameter and the corresponding X-header, Amazon SES uses the value of the FromArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to specify a particular \"From\" address in the header of the raw email.

    Instead of using this parameter, you can use the X-header X-SES-FROM-ARN in the raw message of the email. If you use both the FromArn parameter and the corresponding X-header, Amazon SES uses the value of the FromArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    " }, "SourceArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN in the raw message of the email. If you use both the SourceArn parameter and the corresponding X-header, Amazon SES uses the value of the SourceArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN in the raw message of the email. If you use both the SourceArn parameter and the corresponding X-header, Amazon SES uses the value of the SourceArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    " }, "ReturnPathArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN in the raw message of the email. If you use both the ReturnPathArn parameter and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN in the raw message of the email. If you use both the ReturnPathArn parameter and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn parameter.

    For information about when to use this parameter, see the description of SendRawEmail in this guide, or see the Amazon SES Developer Guide.

    " }, "Tags":{ "shape":"MessageTagList", @@ -3796,7 +3796,7 @@ "documentation":"

    The name of the configuration set to use when you send an email using SendRawEmail.

    " } }, - "documentation":"

    Represents a request to send a single raw email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to send a single raw email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "SendRawEmailResponse":{ "type":"structure", @@ -3820,7 +3820,7 @@ "members":{ "Source":{ "shape":"Address", - "documentation":"

    The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide.

    If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a source email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492. The sender name (also known as the friendly name) may contain non-ASCII characters. These characters must be encoded using MIME encoded-word syntax, as described inRFC 2047. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=.

    " + "documentation":"

    The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide.

    If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. for this reason, The email address string must be 7-bit ASCII. If you want to send to or from email addresses that contain Unicode characters in the domain part of an address, you must encode the domain using Punycode. Punycode is not permitted in the local part of the email address (the part before the @ sign) nor in the \"friendly from\" name. If you want to use Unicode characters in the \"friendly from\" name, you must encode the \"friendly from\" name using MIME encoded-word syntax, as described in Sending raw email using the Amazon SES API. For more information about Punycode, see RFC 3492.

    " }, "Destination":{ "shape":"Destination", @@ -3828,19 +3828,19 @@ }, "ReplyToAddresses":{ "shape":"AddressList", - "documentation":"

    The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.

    " + "documentation":"

    The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address receives the reply.

    " }, "ReturnPath":{ "shape":"Address", - "documentation":"

    The email address that bounces and complaints will be forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.

    " + "documentation":"

    The email address that bounces and complaints are forwarded to when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message is returned from the recipient's ISP; this message is forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.

    " }, "SourceArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to send from user@example.com, then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the Source to be user@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " }, "ReturnPathArn":{ "shape":"AmazonResourceName", - "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " + "documentation":"

    This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.

    For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) attaches a policy to it that authorizes you to use feedback@example.com, then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, and the ReturnPath to be feedback@example.com.

    For more information about sending authorization, see the Amazon SES Developer Guide.

    " }, "Tags":{ "shape":"MessageTagList", @@ -3863,7 +3863,7 @@ "documentation":"

    A list of replacement values to apply to the template. This parameter is a JSON object, typically consisting of key-value pairs in which the keys correspond to replacement tags in the email template.

    " } }, - "documentation":"

    Represents a request to send a templated email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to send a templated email using Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "SendTemplatedEmailResponse":{ "type":"structure", @@ -3884,7 +3884,7 @@ "documentation":"

    The name of the receipt rule set to make active. Setting this value to null disables all email receiving.

    " } }, - "documentation":"

    Represents a request to set a receipt rule set as the active receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to set a receipt rule set as the active receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "SetActiveReceiptRuleSetResponse":{ "type":"structure", @@ -3908,7 +3908,7 @@ "documentation":"

    Sets whether DKIM signing is enabled for an identity. Set to true to enable DKIM signing for this identity; false to disable it.

    " } }, - "documentation":"

    Represents a request to enable or disable Amazon SES Easy DKIM signing for an identity. For more information about setting up Easy DKIM, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to enable or disable Amazon SES Easy DKIM signing for an identity. For more information about setting up Easy DKIM, see the Amazon SES Developer Guide.

    " }, "SetIdentityDkimEnabledResponse":{ "type":"structure", @@ -3929,10 +3929,10 @@ }, "ForwardingEnabled":{ "shape":"Enabled", - "documentation":"

    Sets whether Amazon SES will forward bounce and complaint notifications as email. true specifies that Amazon SES will forward bounce and complaint notifications as email, in addition to any Amazon SNS topic publishing otherwise specified. false specifies that Amazon SES will publish bounce and complaint notifications only through Amazon SNS. This value can only be set to false when Amazon SNS topics are set for both Bounce and Complaint notification types.

    " + "documentation":"

    Sets whether Amazon SES forwards bounce and complaint notifications as email. true specifies that Amazon SES forwards bounce and complaint notifications as email, in addition to any Amazon SNS topic publishing otherwise specified. false specifies that Amazon SES publishes bounce and complaint notifications only through Amazon SNS. This value can only be set to false when Amazon SNS topics are set for both Bounce and Complaint notification types.

    " } }, - "documentation":"

    Represents a request to enable or disable whether Amazon SES forwards you bounce and complaint notifications through email. For information about email feedback forwarding, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to enable or disable whether Amazon SES forwards you bounce and complaint notifications through email. For information about email feedback forwarding, see the Amazon SES Developer Guide.

    " }, "SetIdentityFeedbackForwardingEnabledResponse":{ "type":"structure", @@ -3958,10 +3958,10 @@ }, "Enabled":{ "shape":"Enabled", - "documentation":"

    Sets whether Amazon SES includes the original email headers in Amazon SNS notifications of the specified notification type. A value of true specifies that Amazon SES will include headers in notifications, and a value of false specifies that Amazon SES will not include headers in notifications.

    This value can only be set when NotificationType is already set to use a particular Amazon SNS topic.

    " + "documentation":"

    Sets whether Amazon SES includes the original email headers in Amazon SNS notifications of the specified notification type. A value of true specifies that Amazon SES includes headers in notifications, and a value of false specifies that Amazon SES does not include headers in notifications.

    This value can only be set when NotificationType is already set to use a particular Amazon SNS topic.

    " } }, - "documentation":"

    Represents a request to set whether Amazon SES includes the original email headers in the Amazon SNS notifications of a specified type. For information about notifications, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to set whether Amazon SES includes the original email headers in the Amazon SNS notifications of a specified type. For information about notifications, see the Amazon SES Developer Guide.

    " }, "SetIdentityHeadersInNotificationsEnabledResponse":{ "type":"structure", @@ -3975,18 +3975,18 @@ "members":{ "Identity":{ "shape":"Identity", - "documentation":"

    The verified identity for which you want to enable or disable the specified custom MAIL FROM domain.

    " + "documentation":"

    The verified identity.

    " }, "MailFromDomain":{ "shape":"MailFromDomainName", - "documentation":"

    The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not be used in a \"From\" address if the MAIL FROM domain is the destination of email feedback forwarding (for more information, see the Amazon SES Developer Guide), and 3) not be used to receive emails. A value of null disables the custom MAIL FROM setting for the identity.

    " + "documentation":"

    The custom MAIL FROM domain for the verified identity to use. The MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not be used in a \"From\" address if the MAIL FROM domain is the destination of email feedback forwarding (for more information, see the Amazon SES Developer Guide), and 3) not be used to receive emails. A value of null disables the custom MAIL FROM setting for the identity.

    " }, "BehaviorOnMXFailure":{ "shape":"BehaviorOnMXFailure", - "documentation":"

    The action that you want Amazon SES to take if it cannot successfully read the required MX record when you send an email. If you choose UseDefaultValue, Amazon SES will use amazonses.com (or a subdomain of that) as the MAIL FROM domain. If you choose RejectMessage, Amazon SES will return a MailFromDomainNotVerified error and not send the email.

    The action specified in BehaviorOnMXFailure is taken when the custom MAIL FROM domain setup is in the Pending, Failed, and TemporaryFailure states.

    " + "documentation":"

    The action for Amazon SES to take if it cannot successfully read the required MX record when you send an email. If you choose UseDefaultValue, Amazon SES uses amazonses.com (or a subdomain of that) as the MAIL FROM domain. If you choose RejectMessage, Amazon SES returns a MailFromDomainNotVerified error and not send the email.

    The action specified in BehaviorOnMXFailure is taken when the custom MAIL FROM domain setup is in the Pending, Failed, and TemporaryFailure states.

    " } }, - "documentation":"

    Represents a request to enable or disable the Amazon SES custom MAIL FROM domain setup for a verified identity. For information about using a custom MAIL FROM domain, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to enable or disable the Amazon SES custom MAIL FROM domain setup for a verified identity. For information about using a custom MAIL FROM domain, see the Amazon SES Developer Guide.

    " }, "SetIdentityMailFromDomainResponse":{ "type":"structure", @@ -4003,18 +4003,18 @@ "members":{ "Identity":{ "shape":"Identity", - "documentation":"

    The identity (email address or domain) that you want to set the Amazon SNS topic for.

    You can only specify a verified identity for this parameter.

    You can specify an identity by using its name or by using its Amazon Resource Name (ARN). The following examples are all valid identities: sender@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    " + "documentation":"

    The identity (email address or domain) for the Amazon SNS topic.

    You can only specify a verified identity for this parameter.

    You can specify an identity by using its name or by using its Amazon Resource Name (ARN). The following examples are all valid identities: sender@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com.

    " }, "NotificationType":{ "shape":"NotificationType", - "documentation":"

    The type of notifications that will be published to the specified Amazon SNS topic.

    " + "documentation":"

    The type of notifications that are published to the specified Amazon SNS topic.

    " }, "SnsTopic":{ "shape":"NotificationTopic", "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic. If the parameter is omitted from the request or a null value is passed, SnsTopic is cleared and publishing is disabled.

    " } }, - "documentation":"

    Represents a request to specify the Amazon SNS topic to which Amazon SES will publish bounce, complaint, or delivery notifications for emails sent with that identity as the Source. For information about Amazon SES notifications, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to specify the Amazon SNS topic to which Amazon SES publishes bounce, complaint, or delivery notifications for emails sent with that identity as the source. For information about Amazon SES notifications, see the Amazon SES Developer Guide.

    " }, "SetIdentityNotificationTopicResponse":{ "type":"structure", @@ -4042,7 +4042,7 @@ "documentation":"

    The name of the receipt rule after which to place the specified receipt rule.

    " } }, - "documentation":"

    Represents a request to set the position of a receipt rule in a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to set the position of a receipt rule in a receipt rule set. You use receipt rule sets to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "SetReceiptRulePositionResponse":{ "type":"structure", @@ -4060,10 +4060,10 @@ }, "TopicArn":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the stop action is taken. You can find the ARN of a topic by using the ListTopics Amazon SNS operation.

    For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " } }, - "documentation":"

    When included in a receipt rule, this action terminates the evaluation of the receipt rule set and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    For information about setting a stop action in a receipt rule, see the Amazon SES Developer Guide.

    " + "documentation":"

    When included in a receipt rule, this action terminates the evaluation of the receipt rule set and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS).

    For information about setting a stop action in a receipt rule, see the Amazon SES Developer Guide.

    " }, "StopScope":{ "type":"string", @@ -4078,7 +4078,7 @@ "members":{ "TemplateName":{ "shape":"TemplateName", - "documentation":"

    The name of the template. You will refer to this name when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail operations.

    " + "documentation":"

    The name of the template. You use this name when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail operations.

    " }, "SubjectPart":{ "shape":"SubjectPart", @@ -4086,14 +4086,14 @@ }, "TextPart":{ "shape":"TextPart", - "documentation":"

    The email body that will be visible to recipients whose email clients do not display HTML.

    " + "documentation":"

    The email body that is visible to recipients whose email clients do not display HTML content.

    " }, "HtmlPart":{ "shape":"HtmlPart", "documentation":"

    The HTML body of the email.

    " } }, - "documentation":"

    The content of the email, composed of a subject line, an HTML part, and a text-only part.

    " + "documentation":"

    The content of the email, composed of a subject line and either an HTML part or a text-only part.

    " }, "TemplateContent":{"type":"string"}, "TemplateData":{ @@ -4141,7 +4141,7 @@ "members":{ "TemplateName":{ "shape":"TemplateName", - "documentation":"

    The name of the template that you want to render.

    " + "documentation":"

    The name of the template to render.

    " }, "TemplateData":{ "shape":"TemplateData", @@ -4172,10 +4172,10 @@ "members":{ "CustomRedirectDomain":{ "shape":"CustomRedirectDomain", - "documentation":"

    The custom subdomain that will be used to redirect email recipients to the Amazon SES event tracking domain.

    " + "documentation":"

    The custom subdomain that is used to redirect email recipients to the Amazon SES event tracking domain.

    " } }, - "documentation":"

    A domain that is used to redirect email recipients to an Amazon SES-operated domain. This domain captures open and click events generated by Amazon SES emails.

    For more information, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide.

    " + "documentation":"

    A domain that is used to redirect email recipients to an Amazon SES-operated domain. This domain captures open and click events generated by Amazon SES emails.

    For more information, see Configuring Custom Domains to Handle Open and Click Tracking in the Amazon SES Developer Guide.

    " }, "TrackingOptionsAlreadyExistsException":{ "type":"structure", @@ -4214,7 +4214,7 @@ "members":{ "Enabled":{ "shape":"Enabled", - "documentation":"

    Describes whether email sending is enabled or disabled for your Amazon SES account in the current AWS Region.

    " + "documentation":"

    Describes whether email sending is enabled or disabled for your Amazon SES account in the current Amazon Web Services Region.

    " } }, "documentation":"

    Represents a request to enable or disable the email sending capabilities for your entire Amazon SES account.

    " @@ -4228,14 +4228,14 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

    The name of the configuration set that contains the event destination that you want to update.

    " + "documentation":"

    The name of the configuration set that contains the event destination.

    " }, "EventDestination":{ "shape":"EventDestination", - "documentation":"

    The event destination object that you want to apply to the specified configuration set.

    " + "documentation":"

    The event destination object.

    " } }, - "documentation":"

    Represents a request to update the event destination of a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to update the event destination of a configuration set. Configuration sets enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide.

    " }, "UpdateConfigurationSetEventDestinationResponse":{ "type":"structure", @@ -4252,11 +4252,11 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

    The name of the configuration set that you want to update.

    " + "documentation":"

    The name of the configuration set to update.

    " }, "Enabled":{ "shape":"Enabled", - "documentation":"

    Describes whether or not Amazon SES will publish reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch.

    " + "documentation":"

    Describes whether or not Amazon SES publishes reputation metrics for the configuration set, such as bounce and complaint rates, to Amazon CloudWatch.

    " } }, "documentation":"

    Represents a request to modify the reputation metric publishing settings for a configuration set.

    " @@ -4270,7 +4270,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

    The name of the configuration set that you want to update.

    " + "documentation":"

    The name of the configuration set to update.

    " }, "Enabled":{ "shape":"Enabled", @@ -4288,7 +4288,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

    The name of the configuration set for which you want to update the custom tracking domain.

    " + "documentation":"

    The name of the configuration set.

    " }, "TrackingOptions":{"shape":"TrackingOptions"} }, @@ -4306,7 +4306,7 @@ "members":{ "TemplateName":{ "shape":"TemplateName", - "documentation":"

    The name of the custom verification email template that you want to update.

    " + "documentation":"

    The name of the custom verification email template to update.

    " }, "FromEmailAddress":{ "shape":"FromAddress", @@ -4318,7 +4318,7 @@ }, "TemplateContent":{ "shape":"TemplateContent", - "documentation":"

    The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom Verification Email Frequently Asked Questions in the Amazon SES Developer Guide.

    " + "documentation":"

    The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom Verification Email Frequently Asked Questions in the Amazon SES Developer Guide.

    " }, "SuccessRedirectionURL":{ "shape":"SuccessRedirectionURL", @@ -4347,7 +4347,7 @@ "documentation":"

    A data structure that contains the updated receipt rule information.

    " } }, - "documentation":"

    Represents a request to update a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to update a receipt rule. You use receipt rules to receive email with Amazon SES. For more information, see the Amazon SES Developer Guide.

    " }, "UpdateReceiptRuleResponse":{ "type":"structure", @@ -4396,7 +4396,7 @@ "documentation":"

    The name of the domain to be verified for Easy DKIM signing.

    " } }, - "documentation":"

    Represents a request to generate the CNAME records needed to set up Easy DKIM with Amazon SES. For more information about setting up Easy DKIM, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to generate the CNAME records needed to set up Easy DKIM with Amazon SES. For more information about setting up Easy DKIM, see the Amazon SES Developer Guide.

    " }, "VerifyDomainDkimResponse":{ "type":"structure", @@ -4404,7 +4404,7 @@ "members":{ "DkimTokens":{ "shape":"VerificationTokenList", - "documentation":"

    A set of character strings that represent the domain's identity. If the identity is an email address, the tokens represent the domain of that address.

    Using these tokens, you need to create DNS CNAME records that point to DKIM public keys that are hosted by Amazon SES. Amazon Web Services eventually detects that you've updated your DNS records. This detection process might take up to 72 hours. After successful detection, Amazon SES is able to DKIM-sign email originating from that domain. (This only applies to domain identities, not email address identities.)

    For more information about creating DNS records using DKIM tokens, see the Amazon SES Developer Guide.

    " + "documentation":"

    A set of character strings that represent the domain's identity. If the identity is an email address, the tokens represent the domain of that address.

    Using these tokens, you need to create DNS CNAME records that point to DKIM public keys that are hosted by Amazon SES. Amazon Web Services eventually detects that you've updated your DNS records. This detection process might take up to 72 hours. After successful detection, Amazon SES is able to DKIM-sign email originating from that domain. (This only applies to domain identities, not email address identities.)

    For more information about creating DNS records using DKIM tokens, see the Amazon SES Developer Guide.

    " } }, "documentation":"

    Returns CNAME records that you must publish to the DNS server of your domain to set up Easy DKIM with Amazon SES.

    " @@ -4418,7 +4418,7 @@ "documentation":"

    The domain to be verified.

    " } }, - "documentation":"

    Represents a request to begin Amazon SES domain verification and to generate the TXT records that you must publish to the DNS server of your domain to complete the verification. For information about domain verification, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to begin Amazon SES domain verification and to generate the TXT records that you must publish to the DNS server of your domain to complete the verification. For information about domain verification, see the Amazon SES Developer Guide.

    " }, "VerifyDomainIdentityResponse":{ "type":"structure", @@ -4426,7 +4426,7 @@ "members":{ "VerificationToken":{ "shape":"VerificationToken", - "documentation":"

    A TXT record that you must place in the DNS settings of the domain to complete domain verification with Amazon SES.

    As Amazon SES searches for the TXT record, the domain's verification status is \"Pending\". When Amazon SES detects the record, the domain's verification status changes to \"Success\". If Amazon SES is unable to detect the record within 72 hours, the domain's verification status changes to \"Failed.\" In that case, if you still want to verify the domain, you must restart the verification process from the beginning.

    " + "documentation":"

    A TXT record that you must place in the DNS settings of the domain to complete domain verification with Amazon SES.

    As Amazon SES searches for the TXT record, the domain's verification status is \"Pending\". When Amazon SES detects the record, the domain's verification status changes to \"Success\". If Amazon SES is unable to detect the record within 72 hours, the domain's verification status changes to \"Failed.\" In that case, to verify the domain, you must restart the verification process from the beginning. The domain's verification status also changes to \"Success\" when it is DKIM verified.

    " } }, "documentation":"

    Returns a TXT record that you must publish to the DNS server of your domain to complete domain verification with Amazon SES.

    " @@ -4440,7 +4440,7 @@ "documentation":"

    The email address to be verified.

    " } }, - "documentation":"

    Represents a request to begin email address verification with Amazon SES. For information about email address verification, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to begin email address verification with Amazon SES. For information about email address verification, see the Amazon SES Developer Guide.

    " }, "VerifyEmailIdentityRequest":{ "type":"structure", @@ -4451,7 +4451,7 @@ "documentation":"

    The email address to be verified.

    " } }, - "documentation":"

    Represents a request to begin email address verification with Amazon SES. For information about email address verification, see the Amazon SES Developer Guide.

    " + "documentation":"

    Represents a request to begin email address verification with Amazon SES. For information about email address verification, see the Amazon SES Developer Guide.

    " }, "VerifyEmailIdentityResponse":{ "type":"structure", @@ -4465,15 +4465,15 @@ "members":{ "TopicArn":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the WorkMail action is called. You can find the ARN of a topic by using the ListTopics operation in Amazon SNS.

    For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    " }, "OrganizationArn":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7. For information about Amazon WorkMail organizations, see the Amazon WorkMail Administrator Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon WorkMail organization. Amazon WorkMail ARNs use the following format:

    arn:aws:workmail:<region>:<awsAccountId>:organization/<workmailOrganizationId>

    You can find the ID of your organization by using the ListOrganizations operation in Amazon WorkMail. Amazon WorkMail organization IDs begin with \"m-\", followed by a string of alphanumeric characters.

    For information about Amazon WorkMail organizations, see the Amazon WorkMail Administrator Guide.

    " } }, - "documentation":"

    When included in a receipt rule, this action calls Amazon WorkMail and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS). You will typically not use this action directly because Amazon WorkMail adds the rule automatically during its setup procedure.

    For information using a receipt rule to call Amazon WorkMail, see the Amazon SES Developer Guide.

    " + "documentation":"

    When included in a receipt rule, this action calls Amazon WorkMail and, optionally, publishes a notification to Amazon Simple Notification Service (Amazon SNS). It usually isn't necessary to set this up manually, because Amazon WorkMail adds the rule automatically during its setup procedure.

    For information using a receipt rule to call Amazon WorkMail, see the Amazon SES Developer Guide.

    " } }, - "documentation":"Amazon Simple Email Service

    This document contains reference information for the Amazon Simple Email Service (Amazon SES) API, version 2010-12-01. This document is best used in conjunction with the Amazon SES Developer Guide.

    For a list of Amazon SES endpoints to use in service requests, see Regions and Amazon SES in the Amazon SES Developer Guide.

    " + "documentation":"Amazon Simple Email Service

    This document contains reference information for the Amazon Simple Email Service (Amazon SES) API, version 2010-12-01. This document is best used in conjunction with the Amazon SES Developer Guide.

    For a list of Amazon SES endpoints to use in service requests, see Regions and Amazon SES in the Amazon SES Developer Guide.

    This documentation contains reference information related to the following:

    " } diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index 0cf612fd67e..e3ac772ae37 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 545bca31994..9d33659b91d 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index d36942d2b9e..b7d677ac95a 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/signer/pom.xml b/services/signer/pom.xml index 54928abbe52..430b649e3d4 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/simspaceweaver/pom.xml b/services/simspaceweaver/pom.xml index 1918b65b5fe..9c1f25d696d 100644 --- a/services/simspaceweaver/pom.xml +++ b/services/simspaceweaver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT simspaceweaver AWS Java SDK :: Services :: Sim Space Weaver diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 84d5448c6f6..5ad2bc6b567 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index fab47a688ce..e0d471a344f 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowdevicemanagement/pom.xml b/services/snowdevicemanagement/pom.xml index 01bd0f446a6..ddb11fe120d 100644 --- a/services/snowdevicemanagement/pom.xml +++ b/services/snowdevicemanagement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT snowdevicemanagement AWS Java SDK :: Services :: Snow Device Management diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 8e158c5d83c..747ff7e72f9 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index db014661292..421c160708d 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/sqs/src/main/resources/codegen-resources/service-2.json b/services/sqs/src/main/resources/codegen-resources/service-2.json index 59c24abf082..dc5dc00c1a0 100644 --- a/services/sqs/src/main/resources/codegen-resources/service-2.json +++ b/services/sqs/src/main/resources/codegen-resources/service-2.json @@ -39,7 +39,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"UnsupportedOperation"} ], - "documentation":"

    Cancels a specified message movement task.

    • A message movement can only be cancelled when the current status is RUNNING.

    • Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been moved yet.

    " + "documentation":"

    Cancels a specified message movement task. A message movement can only be cancelled when the current status is RUNNING. Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been moved yet.

    • This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.

    • Currently, only standard queues are supported.

    • Only one active message movement task is supported per queue at any given time.

    " }, "ChangeMessageVisibility":{ "name":"ChangeMessageVisibility", @@ -194,7 +194,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"UnsupportedOperation"} ], - "documentation":"

    Gets the most recent message movement tasks (up to 10) under a specific source queue.

    " + "documentation":"

    Gets the most recent message movement tasks (up to 10) under a specific source queue.

    • This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.

    • Currently, only standard queues are supported.

    • Only one active message movement task is supported per queue at any given time.

    " }, "ListQueueTags":{ "name":"ListQueueTags", @@ -233,7 +233,7 @@ {"shape":"QueueDoesNotExist"}, {"shape":"PurgeQueueInProgress"} ], - "documentation":"

    Deletes the messages in a queue specified by the QueueURL parameter.

    When you use the PurgeQueue action, you can't retrieve any messages deleted from a queue.

    The message deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's size.

    Messages sent to the queue before you call PurgeQueue might be received but are deleted within the next minute.

    Messages sent to the queue after you call PurgeQueue might be deleted while the queue is being purged.

    " + "documentation":"

    Deletes available messages in a queue (including in-flight messages) specified by the QueueURL parameter.

    When you use the PurgeQueue action, you can't retrieve any messages deleted from a queue.

    The message deletion process takes up to 60 seconds. We recommend waiting for 60 seconds regardless of your queue's size.

    Messages sent to the queue before you call PurgeQueue might be received but are deleted within the next minute.

    Messages sent to the queue after you call PurgeQueue might be deleted while the queue is being purged.

    " }, "ReceiveMessage":{ "name":"ReceiveMessage", @@ -325,7 +325,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"UnsupportedOperation"} ], - "documentation":"

    Starts an asynchronous task to move messages from a specified source queue to a specified destination queue.

    • This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.

    • Currently, only standard queues are supported.

    • Only one active message movement task is supported per queue at any given time.

    " + "documentation":"

    Starts an asynchronous task to move messages from a specified source queue to a specified destination queue.

    • This action is currently limited to supporting message redrive from queues that are configured as dead-letter queues (DLQs) of other Amazon SQS queues only. Non-SQS queue sources of dead-letter queues, such as Lambda or Amazon SNS topics, are currently not supported.

    • In dead-letter queues redrive context, the StartMessageMoveTask the source queue is the DLQ, while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue.

    • Currently, only standard queues support redrive. FIFO queues don't support redrive.

    • Only one active message movement task is supported per queue at any given time.

    " }, "TagQueue":{ "name":"TagQueue", @@ -1596,7 +1596,7 @@ "members":{ "SourceArn":{ "shape":"String", - "documentation":"

    The ARN of the queue that contains the messages to be moved to another queue. Currently, only dead-letter queue (DLQ) ARNs are accepted.

    " + "documentation":"

    The ARN of the queue that contains the messages to be moved to another queue. Currently, only ARNs of dead-letter queues (DLQs) whose sources are other Amazon SQS queues are accepted. DLQs whose sources are non-SQS queues, such as Lambda or Amazon SNS topics, are not currently supported.

    " }, "DestinationArn":{ "shape":"String", diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 5c82a52dd28..7fdc973c098 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index 87e1050cb44..08b55671009 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index d2bdbacc0cf..c882863dda2 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmsap/pom.xml b/services/ssmsap/pom.xml index 320e27f95cb..217ba5160c8 100644 --- a/services/ssmsap/pom.xml +++ b/services/ssmsap/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ssmsap AWS Java SDK :: Services :: Ssm Sap diff --git a/services/sso/pom.xml b/services/sso/pom.xml index 28bcc89a62b..2d17058e814 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index 3b010b44663..a105038abf5 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index b8adf748d4a..8b0e2a424e4 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index c945ce515e6..1637090db50 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 1d8b70c0a3c..d3033d5f4ef 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/sts/src/main/resources/codegen-resources/endpoint-tests.json b/services/sts/src/main/resources/codegen-resources/endpoint-tests.json index b566f4aac4a..5e12a28e224 100644 --- a/services/sts/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/sts/src/main/resources/codegen-resources/endpoint-tests.json @@ -702,9 +702,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -734,9 +734,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -766,9 +766,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -798,9 +798,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -830,9 +830,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -862,9 +862,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -894,9 +894,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -926,9 +926,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -958,9 +958,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -990,9 +990,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1022,9 +1022,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1054,9 +1054,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1086,9 +1086,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1118,9 +1118,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1150,9 +1150,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1182,9 +1182,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-1" } ] }, @@ -1214,9 +1214,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-3", "signingName": "sts", - "name": "sigv4" + "name": "sigv4", + "signingRegion": "us-east-3" } ] }, diff --git a/services/sts/src/main/resources/codegen-resources/service-2.json b/services/sts/src/main/resources/codegen-resources/service-2.json index cb44d617c96..42f00675870 100644 --- a/services/sts/src/main/resources/codegen-resources/service-2.json +++ b/services/sts/src/main/resources/codegen-resources/service-2.json @@ -203,6 +203,10 @@ "SourceIdentity":{ "shape":"sourceIdentityType", "documentation":"

    The source identity specified by the principal that is calling the AssumeRole operation.

    You can require users to specify a source identity when they assume a role. You do this by using the sts:SourceIdentity condition key in a role trust policy. You can use source identity information in CloudTrail logs to determine who took actions with a role. You can use the aws:SourceIdentity condition key to further control access to Amazon Web Services resources based on the value of source identity. For more information about using source identity, see Monitor and control actions taken with assumed roles in the IAM User Guide.

    The regex used to validate this parameter is a string of characters consisting of upper- and lower-case alphanumeric characters with no spaces. You can also include underscores or any of the following characters: =,.@-. You cannot use a value that begins with the text aws:. This prefix is reserved for Amazon Web Services internal use.

    " + }, + "ProvidedContexts":{ + "shape":"ProvidedContextsListType", + "documentation":"

    Reserved for future use.

    " } } }, @@ -322,7 +326,7 @@ }, "WebIdentityToken":{ "shape":"clientTokenType", - "documentation":"

    The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call.

    " + "documentation":"

    The OAuth 2.0 access token or OpenID Connect ID token that is provided by the identity provider. Your application must get this token by authenticating the user who is using your application with a web identity provider before the application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA algorithms (RS256) are supported.

    " }, "ProviderId":{ "shape":"urlType", @@ -677,6 +681,25 @@ }, "documentation":"

    A reference to the IAM managed policy that is passed as a session policy for a role session or a federated user session.

    " }, + "ProvidedContext":{ + "type":"structure", + "members":{ + "ProviderArn":{ + "shape":"arnType", + "documentation":"

    Reserved for future use.

    " + }, + "ContextAssertion":{ + "shape":"contextAssertionType", + "documentation":"

    Reserved for future use.

    " + } + }, + "documentation":"

    Reserved for future use.

    " + }, + "ProvidedContextsListType":{ + "type":"list", + "member":{"shape":"ProvidedContext"}, + "max":5 + }, "RegionDisabledException":{ "type":"structure", "members":{ @@ -745,6 +768,11 @@ "min":4, "sensitive":true }, + "contextAssertionType":{ + "type":"string", + "max":2048, + "min":4 + }, "dateType":{"type":"timestamp"}, "decodedMessageType":{"type":"string"}, "durationSecondsType":{ diff --git a/services/support/pom.xml b/services/support/pom.xml index ff6cd664ca1..cc15ff17b2a 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/supportapp/pom.xml b/services/supportapp/pom.xml index 453333d0ec4..b44ea2751b9 100644 --- a/services/supportapp/pom.xml +++ b/services/supportapp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT supportapp AWS Java SDK :: Services :: Support App diff --git a/services/swf/pom.xml b/services/swf/pom.xml index ca8951e0e34..638d405a711 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/swf/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/swf/src/main/resources/codegen-resources/endpoint-rule-set.json index 64949755fc8..c25159f6f59 100644 --- a/services/swf/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/swf/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://swf-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://swf-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://swf.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://swf-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://swf.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://swf-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://swf.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://swf.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://swf.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://swf.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/swf/src/main/resources/codegen-resources/service-2.json b/services/swf/src/main/resources/codegen-resources/service-2.json index 349e3f1c605..afbf005dc65 100644 --- a/services/swf/src/main/resources/codegen-resources/service-2.json +++ b/services/swf/src/main/resources/codegen-resources/service-2.json @@ -1544,6 +1544,11 @@ "startedEventId":{ "shape":"EventId", "documentation":"

    The ID of the DecisionTaskStarted event recorded when this decision task was started. This information can be useful for diagnosing problems by tracing back the chain of events leading up to this event.

    " + }, + "taskList":{"shape":"TaskList"}, + "taskListScheduleToStartTimeout":{ + "shape":"DurationInSecondsOptional", + "documentation":"

    The maximum amount of time the decision task can wait to be assigned to a worker.

    " } }, "documentation":"

    Provides the details of the DecisionTaskCompleted event.

    " @@ -1563,6 +1568,10 @@ "startToCloseTimeout":{ "shape":"DurationInSecondsOptional", "documentation":"

    The maximum duration for this decision task. The task is considered timed out if it doesn't completed within this duration.

    The duration is specified in seconds, an integer greater than or equal to 0. You can use NONE to specify unlimited duration.

    " + }, + "scheduleToStartTimeout":{ + "shape":"DurationInSecondsOptional", + "documentation":"

    The maximum amount of time the decision task can wait to be assigned to a worker.

    " } }, "documentation":"

    Provides details about the DecisionTaskScheduled event.

    " @@ -1607,7 +1616,10 @@ }, "DecisionTaskTimeoutType":{ "type":"string", - "enum":["START_TO_CLOSE"] + "enum":[ + "START_TO_CLOSE", + "SCHEDULE_TO_START" + ] }, "DecisionType":{ "type":"string", @@ -3218,6 +3230,14 @@ "executionContext":{ "shape":"Data", "documentation":"

    User defined context to add to workflow execution.

    " + }, + "taskList":{ + "shape":"TaskList", + "documentation":"

    The task list to use for the future decision tasks of this workflow execution. This list overrides the original task list you specified while starting the workflow execution.

    " + }, + "taskListScheduleToStartTimeout":{ + "shape":"DurationInSecondsOptional", + "documentation":"

    Specifies a timeout (in seconds) for the task list override. When this parameter is missing, the task list override is permanent. This parameter makes it possible to temporarily override the task list. If a decision task scheduled on the override task list is not started within the timeout, the decision task will time out. Amazon SWF will revert the override and schedule a new decision task to the original task list.

    If a decision task scheduled on the override task list is started within the timeout, but not completed within the start-to-close timeout, Amazon SWF will also revert the override and schedule a new decision task to the original task list.

    " } }, "documentation":"

    Input data for a TaskCompleted response to a decision task.

    " diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index afafe97b717..e9655bd603d 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/textract/pom.xml b/services/textract/pom.xml index c0e5956c98a..9b344e3ead8 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index 88883f0f00f..4f5393b4727 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 59b8d58ba2e..0f2f8db29e9 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/tnb/pom.xml b/services/tnb/pom.xml index 661ca940f73..f46cae20bdd 100644 --- a/services/tnb/pom.xml +++ b/services/tnb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT tnb AWS Java SDK :: Services :: Tnb diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 6514bd95fba..0386e1badf5 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index edf20986b7b..c2713f0a019 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index a372460601e..f3e40f88268 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/transfer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/transfer/src/main/resources/codegen-resources/endpoint-rule-set.json index 6f1477c015b..2694e990a7d 100644 --- a/services/transfer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/transfer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://transfer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://transfer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://transfer-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://transfer-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://transfer.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://transfer.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://transfer.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://transfer.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json index 6e21589ddef..673f1450cbd 100644 --- a/services/transfer/src/main/resources/codegen-resources/service-2.json +++ b/services/transfer/src/main/resources/codegen-resources/service-2.json @@ -65,7 +65,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Creates the connector, which captures the parameters for an outbound connection for the AS2 protocol. The connector is required for sending files to an externally hosted AS2 server. For more details about connectors, see Create AS2 connectors.

    " + "documentation":"

    Creates the connector, which captures the parameters for a connection for the AS2 or SFTP protocol. For AS2, the connector is required for sending files to an externally hosted AS2 server. For SFTP, the connector is required when sending files to an SFTP server or receiving files from an SFTP server. For more details about connectors, see Create AS2 connectors and Create SFTP connectors.

    You must specify exactly one configuration object: either for AS2 (As2Config) or SFTP (SftpConfig).

    " }, "CreateProfile":{ "name":"CreateProfile", @@ -196,7 +196,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Deletes the agreement that's specified in the provided ConnectorId.

    " + "documentation":"

    Deletes the connector that's specified in the provided ConnectorId.

    " }, "DeleteHostKey":{ "name":"DeleteHostKey", @@ -753,7 +753,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Begins an outbound file transfer to a remote AS2 server. You specify the ConnectorId and the file paths for where to send the files.

    " + "documentation":"

    Begins a file transfer between local Amazon Web Services storage and a remote AS2 or SFTP server.

    • For an AS2 connector, you specify the ConnectorId and one or more SendFilePaths to identify the files you want to transfer.

    • For an SFTP connector, the file transfer can be either outbound or inbound. In both cases, you specify the ConnectorId. Depending on the direction of the transfer, you also specify the following items:

      • If you are transferring file from a partner's SFTP server to Amazon Web Services storage, you specify one or more RetreiveFilePaths to identify the files you want to transfer, and a LocalDirectoryPath to specify the destination folder.

      • If you are transferring file to a partner's SFTP server from Amazon Web Services storage, you specify one or more SendFilePaths to identify the files you want to transfer, and a RemoteDirectoryPath to specify the destination folder.

    " }, "StartServer":{ "name":"StartServer", @@ -802,6 +802,22 @@ ], "documentation":"

    Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are users, servers, roles, and other entities.

    There is no response returned from this call.

    " }, + "TestConnection":{ + "name":"TestConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TestConnectionRequest"}, + "output":{"shape":"TestConnectionResponse"}, + "errors":[ + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalServiceError"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Tests whether your SFTP connector is set up successfully. We highly recommend that you call this operation to test your ability to transfer files between a Transfer Family server and a trading partner's SFTP server.

    " + }, "TestIdentityProvider":{ "name":"TestIdentityProvider", "http":{ @@ -1050,7 +1066,7 @@ "documentation":"

    Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication, you must provide the name or Amazon Resource Name (ARN) of a secret in Secrets Manager.

    The default value for this parameter is null, which indicates that Basic authentication is not enabled for the connector.

    If the connector should use Basic authentication, the secret needs to be in the following format:

    { \"Username\": \"user-name\", \"Password\": \"user-password\" }

    Replace user-name and user-password with the credentials for the actual user that is being authenticated.

    Note the following:

    • You are storing these credentials in Secrets Manager, not passing them directly into this API.

    • If you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication. However, if you are using the Amazon Web Services management console, you can have the system create the secret for you.

    If you have previously enabled Basic authentication for a connector, you can disable it by using the UpdateConnector API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:

    update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'

    " } }, - "documentation":"

    Contains the details for a connector object. The connector object is used for AS2 outbound processes, to connect the Transfer Family customer with the trading partner.

    " + "documentation":"

    Contains the details for an AS2 connector object. The connector object is used for AS2 outbound processes, to connect the Transfer Family customer with the trading partner.

    " }, "As2ConnectorSecretId":{ "type":"string", @@ -1268,7 +1284,7 @@ }, "AccessRole":{ "shape":"Role", - "documentation":"

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    " + "documentation":"

    Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

    For AS2 connectors

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    For SFTP connectors

    Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.

    " }, "Status":{ "shape":"AgreementStatusType", @@ -1294,21 +1310,20 @@ "type":"structure", "required":[ "Url", - "As2Config", "AccessRole" ], "members":{ "Url":{ "shape":"Url", - "documentation":"

    The URL of the partner's AS2 endpoint.

    " + "documentation":"

    The URL of the partner's AS2 or SFTP endpoint.

    " }, "As2Config":{ "shape":"As2ConnectorConfig", - "documentation":"

    A structure that contains the parameters for a connector object.

    " + "documentation":"

    A structure that contains the parameters for an AS2 connector object.

    " }, "AccessRole":{ "shape":"Role", - "documentation":"

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    " + "documentation":"

    Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

    For AS2 connectors

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    For SFTP connectors

    Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.

    " }, "LoggingRole":{ "shape":"Role", @@ -1317,6 +1332,10 @@ "Tags":{ "shape":"Tags", "documentation":"

    Key-value pairs that can be used to group and search for connectors. Tags are metadata attached to connectors for any purpose.

    " + }, + "SftpConfig":{ + "shape":"SftpConnectorConfig", + "documentation":"

    A structure that contains the parameters for an SFTP connector object.

    " } } }, @@ -1462,7 +1481,7 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

    Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.

    The following is an Entry and Target pair example.

    [ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

    In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

    The following is an Entry and Target pair example for chroot.

    [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

    " + "documentation":"

    Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) role provides access to paths in Target. This value can be set only when HomeDirectoryType is set to LOGICAL.

    The following is an Entry and Target pair example.

    [ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

    In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the value the user should see for their home directory when they log in.

    The following is an Entry and Target pair example for chroot.

    [ { \"Entry\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

    " }, "Policy":{ "shape":"Policy", @@ -2112,7 +2131,7 @@ }, "AccessRole":{ "shape":"Role", - "documentation":"

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    " + "documentation":"

    Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

    For AS2 connectors

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    For SFTP connectors

    Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.

    " }, "Tags":{ "shape":"Tags", @@ -2198,15 +2217,15 @@ }, "Url":{ "shape":"Url", - "documentation":"

    The URL of the partner's AS2 endpoint.

    " + "documentation":"

    The URL of the partner's AS2 or SFTP endpoint.

    " }, "As2Config":{ "shape":"As2ConnectorConfig", - "documentation":"

    A structure that contains the parameters for a connector object.

    " + "documentation":"

    A structure that contains the parameters for an AS2 connector object.

    " }, "AccessRole":{ "shape":"Role", - "documentation":"

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    " + "documentation":"

    Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

    For AS2 connectors

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    For SFTP connectors

    Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.

    " }, "LoggingRole":{ "shape":"Role", @@ -2215,6 +2234,10 @@ "Tags":{ "shape":"Tags", "documentation":"

    Key-value pairs that can be used to group and search for connectors.

    " + }, + "SftpConfig":{ + "shape":"SftpConnectorConfig", + "documentation":"

    A structure that contains the parameters for an SFTP connector object.

    " } }, "documentation":"

    Describes the parameters for the connector, as identified by the ConnectorId.

    " @@ -3510,7 +3533,7 @@ }, "Url":{ "shape":"Url", - "documentation":"

    The URL of the partner's AS2 endpoint.

    " + "documentation":"

    The URL of the partner's AS2 or SFTP endpoint.

    " } }, "documentation":"

    Returns details of the connector that is specified.

    " @@ -4035,6 +4058,11 @@ "max":16, "min":0 }, + "SecretId":{ + "type":"string", + "max":2048, + "min":1 + }, "SecurityGroupId":{ "type":"string", "max":20, @@ -4144,6 +4172,31 @@ "PUBLIC_KEY_AND_PASSWORD" ] }, + "SftpConnectorConfig":{ + "type":"structure", + "members":{ + "UserSecretId":{ + "shape":"SecretId", + "documentation":"

    The identifiers for the secrets (in Amazon Web Services Secrets Manager) that contain the SFTP user's private keys or passwords.

    " + }, + "TrustedHostKeys":{ + "shape":"SftpConnectorTrustedHostKeyList", + "documentation":"

    The public portion of the host key, or keys, that are used to authenticate the user to the external server to which you are connecting. You can use the ssh-keyscan command against the SFTP server to retrieve the necessary key.

    The three standard SSH public key format elements are <key type>, <body base64>, and an optional <comment>, with spaces between each element.

    For the trusted host key, Transfer Family accepts RSA and ECDSA keys.

    • For RSA keys, the key type is ssh-rsa.

    • For ECDSA keys, the key type is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the size of the key you generated.

    " + } + }, + "documentation":"

    Contains the details for an SFTP connector object. The connector object is used for transferring files to and from a partner's SFTP server.

    " + }, + "SftpConnectorTrustedHostKey":{ + "type":"string", + "max":2048, + "min":1 + }, + "SftpConnectorTrustedHostKeyList":{ + "type":"list", + "member":{"shape":"SftpConnectorTrustedHostKey"}, + "max":10, + "min":1 + }, "SigningAlg":{ "type":"string", "enum":[ @@ -4205,18 +4258,27 @@ }, "StartFileTransferRequest":{ "type":"structure", - "required":[ - "ConnectorId", - "SendFilePaths" - ], + "required":["ConnectorId"], "members":{ "ConnectorId":{ "shape":"ConnectorId", - "documentation":"

    The unique identifier for the connector.

    " + "documentation":"

    The unique identifier for the connector.

    " }, "SendFilePaths":{ "shape":"FilePaths", - "documentation":"

    An array of strings. Each string represents the absolute path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .

    " + "documentation":"

    One or more source paths for the Transfer Family server. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .

    Replace DOC-EXAMPLE-BUCKET with one of your actual buckets.

    " + }, + "RetrieveFilePaths":{ + "shape":"FilePaths", + "documentation":"

    One or more source paths for the partner's SFTP server. Each string represents a source file path for one inbound file transfer.

    " + }, + "LocalDirectoryPath":{ + "shape":"FilePath", + "documentation":"

    For an inbound transfer, the LocaDirectoryPath specifies the destination for one or more files that are transferred from the partner's SFTP server.

    " + }, + "RemoteDirectoryPath":{ + "shape":"FilePath", + "documentation":"

    For an outbound transfer, the RemoteDirectoryPath specifies the destination for one or more files that are transferred to the partner's SFTP server. If you don't specify a RemoteDirectoryPath, the destination for transferred files is the SFTP user's home directory.

    " } } }, @@ -4226,7 +4288,7 @@ "members":{ "TransferId":{ "shape":"TransferId", - "documentation":"

    Returns the unique identifier for this file transfer.

    " + "documentation":"

    Returns the unique identifier for the file transfer.

    " } } }, @@ -4252,6 +4314,7 @@ "STOP_FAILED" ] }, + "Status":{"type":"string"}, "StatusCode":{"type":"integer"}, "StepResultOutputsJson":{ "type":"string", @@ -4351,6 +4414,33 @@ "max":50, "min":1 }, + "TestConnectionRequest":{ + "type":"structure", + "required":["ConnectorId"], + "members":{ + "ConnectorId":{ + "shape":"ConnectorId", + "documentation":"

    The unique identifier for the connector.

    " + } + } + }, + "TestConnectionResponse":{ + "type":"structure", + "members":{ + "ConnectorId":{ + "shape":"ConnectorId", + "documentation":"

    Returns the identifier of the connector object that you are testing.

    " + }, + "Status":{ + "shape":"Status", + "documentation":"

    Returns OK for successful test, or ERROR if the test fails.

    " + }, + "StatusMessage":{ + "shape":"Message", + "documentation":"

    Returns Connection succeeded if the test is successful. Or, returns a descriptive error message if the test fails. The following list provides the details for some error messages and troubleshooting steps for each.

    • Unable to access secrets manager: Verify that your secret name aligns with the one in Transfer Role permissions.

    • Unknown Host/Connection failed: Verify the server URL in the connector configuration , and verify that the login credentials work successfully outside of the connector.

    • Private key not found: Verify that the secret exists and is formatted correctly.

    • Invalid trusted host keys: Verify that the trusted host key in the connector configuration matches the ssh-keyscan output.

    " + } + } + }, "TestIdentityProviderRequest":{ "type":"structure", "required":[ @@ -4536,7 +4626,7 @@ }, "AccessRole":{ "shape":"Role", - "documentation":"

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    " + "documentation":"

    Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

    For AS2 connectors

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    For SFTP connectors

    Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.

    " } } }, @@ -4592,19 +4682,23 @@ }, "Url":{ "shape":"Url", - "documentation":"

    The URL of the partner's AS2 endpoint.

    " + "documentation":"

    The URL of the partner's AS2 or SFTP endpoint.

    " }, "As2Config":{ "shape":"As2ConnectorConfig", - "documentation":"

    A structure that contains the parameters for a connector object.

    " + "documentation":"

    A structure that contains the parameters for an AS2 connector object.

    " }, "AccessRole":{ "shape":"Role", - "documentation":"

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    " + "documentation":"

    Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

    For AS2 connectors

    With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

    If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

    For SFTP connectors

    Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.

    " }, "LoggingRole":{ "shape":"Role", "documentation":"

    The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.

    " + }, + "SftpConfig":{ + "shape":"SftpConnectorConfig", + "documentation":"

    A structure that contains the parameters for an SFTP connector object.

    " } } }, diff --git a/services/translate/pom.xml b/services/translate/pom.xml index effa404986b..60377c51d99 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 translate diff --git a/services/verifiedpermissions/pom.xml b/services/verifiedpermissions/pom.xml index 1866b43bea3..77f4b11c713 100644 --- a/services/verifiedpermissions/pom.xml +++ b/services/verifiedpermissions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT verifiedpermissions AWS Java SDK :: Services :: Verified Permissions diff --git a/services/voiceid/pom.xml b/services/voiceid/pom.xml index faec2a16ec9..630ab16447f 100644 --- a/services/voiceid/pom.xml +++ b/services/voiceid/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT voiceid AWS Java SDK :: Services :: Voice ID diff --git a/services/vpclattice/pom.xml b/services/vpclattice/pom.xml index 17d61f0f17b..83cae67e285 100644 --- a/services/vpclattice/pom.xml +++ b/services/vpclattice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT vpclattice AWS Java SDK :: Services :: VPC Lattice diff --git a/services/waf/pom.xml b/services/waf/pom.xml index d9fe93ee55e..f3c754d97cf 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index db8a2759b4a..6c5e75e39cc 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index 96383a6c7cc..b09e5fe991d 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wisdom/pom.xml b/services/wisdom/pom.xml index 0bf0aa0d69f..efc100fde13 100644 --- a/services/wisdom/pom.xml +++ b/services/wisdom/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT wisdom AWS Java SDK :: Services :: Wisdom diff --git a/services/wisdom/src/main/resources/codegen-resources/endpoint-tests.json b/services/wisdom/src/main/resources/codegen-resources/endpoint-tests.json index 992d9927332..d3307f81d16 100644 --- a/services/wisdom/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/wisdom/src/main/resources/codegen-resources/endpoint-tests.json @@ -8,9 +8,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-northeast-1", "UseFIPS": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-central-1", "UseFIPS": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "eu-west-2", "UseFIPS": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-west-2", "UseFIPS": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -99,9 +99,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -125,9 +125,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -138,9 +138,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -164,9 +164,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -190,9 +190,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -216,9 +216,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -229,9 +240,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -242,9 +264,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -255,9 +288,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -268,9 +312,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -281,9 +325,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -295,8 +339,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -306,9 +350,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -318,11 +362,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/wisdom/src/main/resources/codegen-resources/service-2.json b/services/wisdom/src/main/resources/codegen-resources/service-2.json index c4c4b9ccfa6..1f3cd3e59dd 100644 --- a/services/wisdom/src/main/resources/codegen-resources/service-2.json +++ b/services/wisdom/src/main/resources/codegen-resources/service-2.json @@ -686,6 +686,10 @@ "shape":"Description", "documentation":"

    The description.

    " }, + "integrationConfiguration":{ + "shape":"AssistantIntegrationConfiguration", + "documentation":"

    The configuration information for the Wisdom assistant integration.

    " + }, "name":{ "shape":"Name", "documentation":"

    The name.

    " @@ -709,6 +713,16 @@ }, "documentation":"

    The assistant data.

    " }, + "AssistantIntegrationConfiguration":{ + "type":"structure", + "members":{ + "topicIntegrationArn":{ + "shape":"GenericArn", + "documentation":"

    The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for streaming chat messages.

    " + } + }, + "documentation":"

    The configuration information for the Wisdom assistant integration.

    " + }, "AssistantList":{ "type":"list", "member":{"shape":"AssistantSummary"} @@ -746,6 +760,10 @@ "shape":"Description", "documentation":"

    The description of the assistant.

    " }, + "integrationConfiguration":{ + "shape":"AssistantIntegrationConfiguration", + "documentation":"

    The configuration information for the Wisdom assistant integration.

    " + }, "name":{ "shape":"Name", "documentation":"

    The name of the assistant.

    " @@ -2400,6 +2418,10 @@ "shape":"Description", "documentation":"

    The description of the session.

    " }, + "integrationConfiguration":{ + "shape":"SessionIntegrationConfiguration", + "documentation":"

    The configuration information for the session integration.

    " + }, "name":{ "shape":"Name", "documentation":"

    The name of the session.

    " @@ -2419,6 +2441,16 @@ }, "documentation":"

    Information about the session.

    " }, + "SessionIntegrationConfiguration":{ + "type":"structure", + "members":{ + "topicIntegrationArn":{ + "shape":"GenericArn", + "documentation":"

    The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for streaming chat messages.

    " + } + }, + "documentation":"

    The configuration information for the session integration.

    " + }, "SessionSummaries":{ "type":"list", "member":{"shape":"SessionSummary"} diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 03f7ab77747..62e4c30b262 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index 79503aad903..f319f11cca7 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index f6b35b60ac7..fc5461e39b7 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 workmail diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index 48294759d39..0f70544170d 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index f783cef4cf7..8e75ee17f63 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspaces/src/main/resources/codegen-resources/service-2.json b/services/workspaces/src/main/resources/codegen-resources/service-2.json index 60518a9a905..3f36aa5bb45 100644 --- a/services/workspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/workspaces/src/main/resources/codegen-resources/service-2.json @@ -4162,7 +4162,7 @@ }, "VolumeEncryptionKey":{ "shape":"VolumeEncryptionKey", - "documentation":"

    The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.

    " + "documentation":"

    The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.

    " }, "UserVolumeEncryptionEnabled":{ "shape":"BooleanObject", @@ -4621,7 +4621,7 @@ }, "VolumeEncryptionKey":{ "shape":"VolumeEncryptionKey", - "documentation":"

    The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.

    " + "documentation":"

    The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.

    " }, "UserVolumeEncryptionEnabled":{ "shape":"BooleanObject", diff --git a/services/workspacesweb/pom.xml b/services/workspacesweb/pom.xml index 397e2e0dc70..3f2c3fa83c7 100644 --- a/services/workspacesweb/pom.xml +++ b/services/workspacesweb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT workspacesweb AWS Java SDK :: Services :: Work Spaces Web diff --git a/services/xray/pom.xml b/services/xray/pom.xml index 3f414c207d7..ae9252a6bab 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/test/auth-tests/pom.xml b/test/auth-tests/pom.xml index 4e0d41d8b4e..68e47c72ae9 100644 --- a/test/auth-tests/pom.xml +++ b/test/auth-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 4337ddd6b43..04096bebb58 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 47ab32c1a1a..abfc2e70578 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index 4e5e98576d7..2418d405222 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 46308b50a28..9a2606614d5 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index 75a27061528..ee1b2a0cb04 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/region-testing/pom.xml b/test/region-testing/pom.xml index b0505cc4194..89182d40cbc 100644 --- a/test/region-testing/pom.xml +++ b/test/region-testing/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/ruleset-testing-core/pom.xml b/test/ruleset-testing-core/pom.xml index 3ceb0897403..f3716ce9e82 100644 --- a/test/ruleset-testing-core/pom.xml +++ b/test/ruleset-testing-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 5276c282c9a..366bfa81ff6 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index dc0b666cb64..c2ad4ff9dc7 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index 9ee0eedf697..6bb7fa84e60 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index ce289cb2c03..b52f17756fd 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index f6376fa40d2..ec2f1e98237 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 41dbee939a1..573198abefa 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 66f941ba306..254fa7d9daf 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/third-party/pom.xml b/third-party/pom.xml index 1a96e25b682..c2c6e2bb5c5 100644 --- a/third-party/pom.xml +++ b/third-party/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT third-party diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml index 0fa1e0dba61..c5da43f6db3 100644 --- a/third-party/third-party-jackson-core/pom.xml +++ b/third-party/third-party-jackson-core/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml index 196734a6a29..facc935202e 100644 --- a/third-party/third-party-jackson-dataformat-cbor/pom.xml +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index a3e4269cbea..87783b1a474 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.109-SNAPSHOT + 2.20.126-SNAPSHOT 4.0.0 diff --git a/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java b/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java index 192ea7cead9..e8d9271f3b6 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/BinaryUtils.java @@ -133,12 +133,12 @@ public static ByteBuffer immutableCopyOf(ByteBuffer bb) { if (bb == null) { return null; } - int sourceBufferPosition = bb.position(); ByteBuffer readOnlyCopy = bb.asReadOnlyBuffer(); readOnlyCopy.rewind(); ByteBuffer cloned = ByteBuffer.allocate(readOnlyCopy.capacity()) .put(readOnlyCopy); - cloned.position(sourceBufferPosition); + cloned.position(bb.position()); + cloned.limit(bb.limit()); return cloned.asReadOnlyBuffer(); } diff --git a/utils/src/main/java/software/amazon/awssdk/utils/async/SimplePublisher.java b/utils/src/main/java/software/amazon/awssdk/utils/async/SimplePublisher.java index 15bba8a0aaf..11d029ee96c 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/async/SimplePublisher.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/async/SimplePublisher.java @@ -382,7 +382,7 @@ public void request(long n) { @Override public void cancel() { - log.trace(() -> "Received cancel()"); + log.trace(() -> "Received cancel() from " + subscriber); // Create exception here instead of in supplier to preserve a more-useful stack trace. highPriorityQueue.add(new CancelQueueEntry<>()); diff --git a/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java b/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java index 4e416ea9e3b..b1287e2990e 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/BinaryUtilsTest.java @@ -226,6 +226,17 @@ public void testImmutableCopyOfByteBuffer() { assertArrayEquals(bytesInSourceAfterCopy, fromSource); } + @Test + public void immutableCopyOf_retainsOriginalLimit() { + ByteBuffer sourceBuffer = ByteBuffer.allocate(10); + byte[] bytes = {1, 2, 3, 4}; + sourceBuffer.put(bytes); + sourceBuffer.rewind(); + sourceBuffer.limit(bytes.length); + ByteBuffer copy = BinaryUtils.immutableCopyOf(sourceBuffer); + assertThat(copy.limit()).isEqualTo(sourceBuffer.limit()); + } + @Test public void testImmutableCopyOfByteBuffer_nullBuffer() { assertNull(BinaryUtils.immutableCopyOf(null)); From f01ada07765d3d57b687e087a5039c95a795e75a Mon Sep 17 00:00:00 2001 From: David Ho <70000000+davidh44@users.noreply.github.com> Date: Tue, 29 Aug 2023 16:55:18 -0700 Subject: [PATCH 16/17] Request compression async streaming (#4262) * Refactor to common class AwsChunkedInputStream * Sync streaming compression * Sync streaming compression functional tests * Sync streaming compression integ tests * Fix integ test * Async streaming compression * Address comments * Refactor ChunkBuffer class * Address comments * Address comments * Remove unused field * Handle demand in Subscriber * Address comments * Add back final modifier --- .../core/internal/async/ChunkBuffer.java | 56 +++-- .../async/CompressionAsyncRequestBody.java | 212 ++++++++++++++++++ .../pipeline/stages/CompressRequestStage.java | 10 +- .../awssdk/core/async/ChunkBufferTest.java | 118 ++++++++-- .../CompressionAsyncRequestBodyTckTest.java | 111 +++++++++ .../CompressionAsyncRequestBodyTest.java | 173 ++++++++++++++ .../MediaStoreDataIntegrationTestBase.java | 4 +- ...stCompressionStreamingIntegrationTest.java | 28 +-- ...ransferEncodingChunkedIntegrationTest.java | 5 +- .../services/AsyncRequestCompressionTest.java | 205 +++++++++++++++++ .../services/RequestCompressionTest.java | 65 +----- .../service/http/MockAsyncHttpClient.java | 62 ++++- 12 files changed, 933 insertions(+), 116 deletions(-) create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java create mode 100644 test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java index c171b078767..bdf84d549b8 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java @@ -21,14 +21,16 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.utils.Logger; -import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; /** - * Class that will buffer incoming BufferBytes of totalBytes length to chunks of bufferSize* + * Class that will buffer incoming BufferBytes to chunks of bufferSize. + * If totalBytes is not provided, i.e. content-length is unknown, {@link #getBufferedData()} should be used in the Subscriber's + * {@code onComplete()} to check for a final chunk that is smaller than the chunk size, and send if present. */ @SdkInternalApi public final class ChunkBuffer { @@ -36,30 +38,26 @@ public final class ChunkBuffer { private final AtomicLong transferredBytes; private final ByteBuffer currentBuffer; private final int chunkSize; - private final long totalBytes; + private final Long totalBytes; private ChunkBuffer(Long totalBytes, Integer bufferSize) { - Validate.notNull(totalBytes, "The totalBytes must not be null"); - int chunkSize = bufferSize != null ? bufferSize : DEFAULT_ASYNC_CHUNK_SIZE; this.chunkSize = chunkSize; this.currentBuffer = ByteBuffer.allocate(chunkSize); - this.totalBytes = totalBytes; this.transferredBytes = new AtomicLong(0); + this.totalBytes = totalBytes; } public static Builder builder() { return new DefaultBuilder(); } - /** * Split the input {@link ByteBuffer} into multiple smaller {@link ByteBuffer}s, each of which contains {@link #chunkSize} * worth of bytes. If the last chunk of the input ByteBuffer contains less than {@link #chunkSize} data, the last chunk will * be buffered. */ public synchronized Iterable split(ByteBuffer inputByteBuffer) { - if (!inputByteBuffer.hasRemaining()) { return Collections.singletonList(inputByteBuffer); } @@ -71,7 +69,7 @@ public synchronized Iterable split(ByteBuffer inputByteBuffer) { fillCurrentBuffer(inputByteBuffer); if (isCurrentBufferFull()) { - addCurrentBufferToIterable(byteBuffers, chunkSize); + addCurrentBufferToIterable(byteBuffers); } } @@ -82,8 +80,7 @@ public synchronized Iterable split(ByteBuffer inputByteBuffer) { // If this is the last chunk, add data buffered to the iterable if (isLastChunk()) { - int remainingBytesInBuffer = currentBuffer.position(); - addCurrentBufferToIterable(byteBuffers, remainingBytesInBuffer); + addCurrentBufferToIterable(byteBuffers); } return byteBuffers; } @@ -111,19 +108,38 @@ private void splitRemainingInputByteBuffer(ByteBuffer inputByteBuffer, List getBufferedData() { + int remainingBytesInBuffer = currentBuffer.position(); + + if (remainingBytesInBuffer == 0) { + return Optional.empty(); + } + + ByteBuffer bufferedChunk = ByteBuffer.allocate(remainingBytesInBuffer); + currentBuffer.flip(); + bufferedChunk.put(currentBuffer); + bufferedChunk.flip(); + return Optional.of(bufferedChunk); + } + private boolean isLastChunk() { + if (totalBytes == null) { + return false; + } long remainingBytes = totalBytes - transferredBytes.get(); return remainingBytes != 0 && remainingBytes == currentBuffer.position(); } - private void addCurrentBufferToIterable(List byteBuffers, int capacity) { - ByteBuffer bufferedChunk = ByteBuffer.allocate(capacity); - currentBuffer.flip(); - bufferedChunk.put(currentBuffer); - bufferedChunk.flip(); - byteBuffers.add(bufferedChunk); - transferredBytes.addAndGet(bufferedChunk.remaining()); - currentBuffer.clear(); + private void addCurrentBufferToIterable(List byteBuffers) { + Optional bufferedChunk = getBufferedData(); + if (bufferedChunk.isPresent()) { + byteBuffers.add(bufferedChunk.get()); + transferredBytes.addAndGet(bufferedChunk.get().remaining()); + currentBuffer.clear(); + } } private void fillCurrentBuffer(ByteBuffer inputByteBuffer) { @@ -151,8 +167,6 @@ public interface Builder extends SdkBuilder { Builder bufferSize(int bufferSize); Builder totalBytes(long totalBytes); - - } private static final class DefaultBuilder implements Builder { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java new file mode 100644 index 00000000000..82da601f0ac --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBody.java @@ -0,0 +1,212 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static software.amazon.awssdk.core.internal.io.AwsChunkedInputStream.DEFAULT_CHUNK_SIZE; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.async.DelegatingSubscriber; +import software.amazon.awssdk.utils.async.FlatteningSubscriber; +import software.amazon.awssdk.utils.builder.SdkBuilder; + +/** + * Wrapper class to wrap an AsyncRequestBody. + * This will chunk and compress the payload with the provided {@link Compressor}. + */ +@SdkInternalApi +public class CompressionAsyncRequestBody implements AsyncRequestBody { + + private final AsyncRequestBody wrapped; + private final Compressor compressor; + private final int chunkSize; + + private CompressionAsyncRequestBody(DefaultBuilder builder) { + this.wrapped = Validate.paramNotNull(builder.asyncRequestBody, "asyncRequestBody"); + this.compressor = Validate.paramNotNull(builder.compressor, "compressor"); + this.chunkSize = builder.chunkSize != null ? builder.chunkSize : DEFAULT_CHUNK_SIZE; + } + + @Override + public void subscribe(Subscriber s) { + Validate.notNull(s, "Subscription MUST NOT be null."); + + SdkPublisher> split = split(wrapped); + SdkPublisher flattening = flattening(split); + flattening.map(compressor::compress).subscribe(s); + } + + @Override + public Optional contentLength() { + return wrapped.contentLength(); + } + + @Override + public String contentType() { + return wrapped.contentType(); + } + + private SdkPublisher> split(SdkPublisher source) { + return subscriber -> source.subscribe(new SplittingSubscriber(subscriber, chunkSize)); + } + + private SdkPublisher flattening(SdkPublisher> source) { + return subscriber -> source.subscribe(new FlatteningSubscriber<>(subscriber)); + } + + /** + * @return Builder instance to construct a {@link CompressionAsyncRequestBody}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + public interface Builder extends SdkBuilder { + + /** + * Sets the AsyncRequestBody that will be wrapped. + * @param asyncRequestBody + * @return This builder for method chaining. + */ + Builder asyncRequestBody(AsyncRequestBody asyncRequestBody); + + /** + * Sets the compressor to compress the request. + * @param compressor + * @return This builder for method chaining. + */ + Builder compressor(Compressor compressor); + + /** + * Sets the chunk size. Default size is 128 * 1024. + * @param chunkSize + * @return This builder for method chaining. + */ + Builder chunkSize(Integer chunkSize); + } + + private static final class DefaultBuilder implements Builder { + + private AsyncRequestBody asyncRequestBody; + private Compressor compressor; + private Integer chunkSize; + + @Override + public CompressionAsyncRequestBody build() { + return new CompressionAsyncRequestBody(this); + } + + @Override + public Builder asyncRequestBody(AsyncRequestBody asyncRequestBody) { + this.asyncRequestBody = asyncRequestBody; + return this; + } + + @Override + public Builder compressor(Compressor compressor) { + this.compressor = compressor; + return this; + } + + @Override + public Builder chunkSize(Integer chunkSize) { + this.chunkSize = chunkSize; + return this; + } + } + + private static final class SplittingSubscriber extends DelegatingSubscriber> { + private final ChunkBuffer chunkBuffer; + private final AtomicBoolean upstreamDone = new AtomicBoolean(false); + private final AtomicLong downstreamDemand = new AtomicLong(); + private final Object lock = new Object(); + private volatile boolean sentFinalChunk = false; + + protected SplittingSubscriber(Subscriber> subscriber, int chunkSize) { + super(subscriber); + this.chunkBuffer = ChunkBuffer.builder() + .bufferSize(chunkSize) + .build(); + } + + @Override + public void onSubscribe(Subscription s) { + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + if (n <= 0) { + throw new IllegalArgumentException("n > 0 required but it was " + n); + } + + downstreamDemand.getAndAdd(n); + + if (upstreamDone.get()) { + sendFinalChunk(); + } else { + s.request(n); + } + } + + @Override + public void cancel() { + s.cancel(); + } + }); + } + + @Override + public void onNext(ByteBuffer byteBuffer) { + downstreamDemand.decrementAndGet(); + Iterable buffers = chunkBuffer.split(byteBuffer); + subscriber.onNext(buffers); + } + + @Override + public void onComplete() { + upstreamDone.compareAndSet(false, true); + if (downstreamDemand.get() > 0) { + sendFinalChunk(); + } + } + + @Override + public void onError(Throwable t) { + upstreamDone.compareAndSet(false, true); + super.onError(t); + } + + private void sendFinalChunk() { + synchronized (lock) { + if (!sentFinalChunk) { + sentFinalChunk = true; + Optional byteBuffer = chunkBuffer.getBufferedData(); + byteBuffer.ifPresent(buffer -> subscriber.onNext(Collections.singletonList(buffer))); + subscriber.onComplete(); + } + } + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java index 1eadb88d32d..e002697c5f1 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/CompressRequestStage.java @@ -30,6 +30,7 @@ import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.async.CompressionAsyncRequestBody; import software.amazon.awssdk.core.internal.compression.Compressor; import software.amazon.awssdk.core.internal.compression.CompressorType; import software.amazon.awssdk.core.internal.http.HttpClientDependencies; @@ -63,7 +64,6 @@ public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, Requ Compressor compressor = resolveCompressorType(context.executionAttributes()); - // non-streaming if (!isStreaming(context)) { compressEntirePayload(input, compressor); updateContentEncodingHeader(input, compressor); @@ -76,12 +76,14 @@ public SdkHttpFullRequest.Builder execute(SdkHttpFullRequest.Builder input, Requ } if (context.requestProvider() == null) { - // sync streaming input.contentStreamProvider(new CompressionContentStreamProvider(input.contentStreamProvider(), compressor)); + } else { + context.requestProvider(CompressionAsyncRequestBody.builder() + .asyncRequestBody(context.requestProvider()) + .compressor(compressor) + .build()); } - // TODO : streaming - async - updateContentEncodingHeader(input, compressor); return input; } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java index a553a55a453..41250225664 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/ChunkBufferTest.java @@ -16,7 +16,6 @@ package software.amazon.awssdk.core.async; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -24,6 +23,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -41,19 +41,53 @@ class ChunkBufferTest { - @Test - void builderWithNoTotalSize() { - assertThatThrownBy(() -> ChunkBuffer.builder().build()).isInstanceOf(NullPointerException.class); + @ParameterizedTest + @ValueSource(ints = {1, 6, 10, 23, 25}) + void numberOfChunk_Not_MultipleOfTotalBytes_KnownLength(int totalBytes) { + int bufferSize = 5; + + String inputString = RandomStringUtils.randomAscii(totalBytes); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(inputString.getBytes(StandardCharsets.UTF_8).length) + .build(); + Iterable byteBuffers = + chunkBuffer.split(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); + + AtomicInteger index = new AtomicInteger(0); + int count = (int) Math.ceil(totalBytes / (double) bufferSize); + int remainder = totalBytes % bufferSize; + + byteBuffers.forEach(r -> { + int i = index.get(); + + try (ByteArrayInputStream inputStream = new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8))) { + byte[] expected; + if (i == count - 1 && remainder != 0) { + expected = new byte[remainder]; + } else { + expected = new byte[bufferSize]; + } + inputStream.skip(i * bufferSize); + inputStream.read(expected); + byte[] actualBytes = BinaryUtils.copyBytesFrom(r); + assertThat(actualBytes).isEqualTo(expected); + index.incrementAndGet(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); } @ParameterizedTest @ValueSource(ints = {1, 6, 10, 23, 25}) - void numberOfChunk_Not_MultipleOfTotalBytes(int totalBytes) { + void numberOfChunk_Not_MultipleOfTotalBytes_UnknownLength(int totalBytes) { int bufferSize = 5; String inputString = RandomStringUtils.randomAscii(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(inputString.getBytes(StandardCharsets.UTF_8).length).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .build(); Iterable byteBuffers = chunkBuffer.split(ByteBuffer.wrap(inputString.getBytes(StandardCharsets.UTF_8))); @@ -83,10 +117,12 @@ void numberOfChunk_Not_MultipleOfTotalBytes(int totalBytes) { } @Test - void zeroTotalBytesAsInput_returnsZeroByte() { + void zeroTotalBytesAsInput_returnsZeroByte_KnownLength() { byte[] zeroByte = new byte[0]; - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(5).totalBytes(zeroByte.length).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(5) + .totalBytes(zeroByte.length) + .build(); Iterable byteBuffers = chunkBuffer.split(ByteBuffer.wrap(zeroByte)); @@ -98,13 +134,30 @@ void zeroTotalBytesAsInput_returnsZeroByte() { } @Test - void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { + void zeroTotalBytesAsInput_returnsZeroByte_UnknownLength() { + byte[] zeroByte = new byte[0]; + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(5) + .build(); + Iterable byteBuffers = + chunkBuffer.split(ByteBuffer.wrap(zeroByte)); + + AtomicInteger iteratedCounts = new AtomicInteger(); + byteBuffers.forEach(r -> { + iteratedCounts.getAndIncrement(); + }); + assertThat(iteratedCounts.get()).isEqualTo(1); + } + @Test + void emptyAllocatedBytes_returnSameNumberOfEmptyBytes_knownLength() { int totalBytes = 17; int bufferSize = 5; ByteBuffer wrap = ByteBuffer.allocate(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(wrap.remaining()).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(wrap.remaining()) + .build(); Iterable byteBuffers = chunkBuffer.split(wrap); @@ -121,6 +174,34 @@ void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { assertThat(iteratedCounts.get()).isEqualTo(4); } + @Test + void emptyAllocatedBytes_returnSameNumberOfEmptyBytes_unknownLength() { + int totalBytes = 17; + int bufferSize = 5; + ByteBuffer wrap = ByteBuffer.allocate(totalBytes); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .build(); + Iterable byteBuffers = + chunkBuffer.split(wrap); + + AtomicInteger iteratedCounts = new AtomicInteger(); + byteBuffers.forEach(r -> { + iteratedCounts.getAndIncrement(); + if (iteratedCounts.get() * bufferSize < totalBytes) { + // array of empty bytes + assertThat(BinaryUtils.copyBytesFrom(r)).isEqualTo(ByteBuffer.allocate(bufferSize).array()); + } else { + assertThat(BinaryUtils.copyBytesFrom(r)).isEqualTo(ByteBuffer.allocate(totalBytes % bufferSize).array()); + } + }); + assertThat(iteratedCounts.get()).isEqualTo(3); + + Optional lastBuffer = chunkBuffer.getBufferedData(); + assertThat(lastBuffer.isPresent()); + assertThat(lastBuffer.get().remaining()).isEqualTo(2); + } + /** * * Total bytes 11(ChunkSize) 3 (threads) @@ -152,14 +233,16 @@ void emptyAllocatedBytes_returnSameNumberOfEmptyBytes() { * 111 is given as output since we consumed all the total bytes* */ @Test - void concurrentTreads_calling_bufferAndCreateChunks() throws ExecutionException, InterruptedException { + void concurrentTreads_calling_bufferAndCreateChunks_knownLength() throws ExecutionException, InterruptedException { int totalBytes = 17; int bufferSize = 5; int threads = 8; ByteBuffer wrap = ByteBuffer.allocate(totalBytes); - ChunkBuffer chunkBuffer = - ChunkBuffer.builder().bufferSize(bufferSize).totalBytes(wrap.remaining() * threads).build(); + ChunkBuffer chunkBuffer = ChunkBuffer.builder() + .bufferSize(bufferSize) + .totalBytes(wrap.remaining() * threads) + .build(); ExecutorService service = Executors.newFixedThreadPool(threads); @@ -198,7 +281,4 @@ void concurrentTreads_calling_bufferAndCreateChunks() throws ExecutionException, assertThat(remainderBytesBuffers.get()).isOne(); assertThat(otherSizeBuffers.get()).isZero(); } - } - - diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java new file mode 100644 index 00000000000..54c74e1e97e --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/async/CompressionAsyncRequestBodyTckTest.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.async; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import io.reactivex.Flowable; +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.nio.ByteBuffer; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Optional; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import software.amazon.awssdk.core.internal.async.CompressionAsyncRequestBody; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; + +public class CompressionAsyncRequestBodyTckTest extends PublisherVerification { + + private static final FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); + private static final Path rootDir = fs.getRootDirectories().iterator().next(); + private static final int MAX_ELEMENTS = 1000; + private static final int CHUNK_SIZE = 128 * 1024; + private static final Compressor compressor = new GzipCompressor(); + + public CompressionAsyncRequestBodyTckTest() { + super(new TestEnvironment()); + } + + @Override + public long maxElementsFromPublisher() { + return MAX_ELEMENTS; + } + + @Override + public Publisher createPublisher(long n) { + return CompressionAsyncRequestBody.builder() + .asyncRequestBody(customAsyncRequestBodyFromFileWithoutContentLength(n)) + .compressor(compressor) + .build(); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + private static AsyncRequestBody customAsyncRequestBodyFromFileWithoutContentLength(long nChunks) { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromFile(fileOfNChunks(nChunks))).subscribe(s); + } + }; + } + + private static Path fileOfNChunks(long nChunks) { + String name = String.format("%d-chunks-file.dat", nChunks); + Path p = rootDir.resolve(name); + if (!Files.exists(p)) { + try (OutputStream os = Files.newOutputStream(p)) { + os.write(createCompressibleArrayOfNChunks(nChunks)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return p; + } + + private static byte[] createCompressibleArrayOfNChunks(long nChunks) { + int size = Math.toIntExact(nChunks * CHUNK_SIZE); + ByteBuffer data = ByteBuffer.allocate(size); + + byte[] a = new byte[size / 4]; + byte[] b = new byte[size / 4]; + Arrays.fill(a, (byte) 'a'); + Arrays.fill(b, (byte) 'b'); + + data.put(a); + data.put(b); + data.put(a); + data.put(b); + + return data.array(); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java new file mode 100644 index 00000000000..ffb15e282a1 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/CompressionAsyncRequestBodyTest.java @@ -0,0 +1,173 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.reactivex.Flowable; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.zip.GZIPInputStream; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.core.internal.util.Mimetype; +import software.amazon.awssdk.http.async.SimpleSubscriber; + +public final class CompressionAsyncRequestBodyTest { + private static final Compressor compressor = new GzipCompressor(); + + @ParameterizedTest + @ValueSource(ints = {80, 1000}) + public void hasCorrectContent(int bodySize) throws Exception { + String testString = createCompressibleStringOfGivenSize(bodySize); + byte[] testBytes = testString.getBytes(); + int chunkSize = 133; + AsyncRequestBody provider = CompressionAsyncRequestBody.builder() + .compressor(compressor) + .asyncRequestBody(customAsyncRequestBodyWithoutContentLength(testBytes)) + .chunkSize(chunkSize) + .build(); + + ByteBuffer byteBuffer = ByteBuffer.allocate(testString.length()); + CountDownLatch done = new CountDownLatch(1); + AtomicInteger pos = new AtomicInteger(); + + Subscriber subscriber = new SimpleSubscriber(buffer -> { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + + // verify each chunk + byte[] chunkToVerify = new byte[chunkSize]; + System.arraycopy(testBytes, pos.get(), chunkToVerify, 0, chunkSize); + chunkToVerify = compressor.compress(chunkToVerify); + + assertThat(bytes).isEqualTo(chunkToVerify); + pos.addAndGet(chunkSize); + }) { + @Override + public void onError(Throwable t) { + super.onError(t); + done.countDown(); + } + + @Override + public void onComplete() { + super.onComplete(); + done.countDown(); + } + }; + + provider.subscribe(subscriber); + done.await(10, TimeUnit.SECONDS); + + byte[] retrieved = byteBuffer.array(); + byte[] uncompressed = decompress(retrieved); + assertThat(new String(uncompressed)).isEqualTo(testString); + } + + @Test + public void emptyBytesConstructor_hasEmptyContent() throws Exception { + AsyncRequestBody requestBody = CompressionAsyncRequestBody.builder() + .compressor(compressor) + .asyncRequestBody(AsyncRequestBody.empty()) + .build(); + + ByteBuffer byteBuffer = ByteBuffer.allocate(0); + CountDownLatch done = new CountDownLatch(1); + + Subscriber subscriber = new SimpleSubscriber(buffer -> { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + }) { + @Override + public void onError(Throwable t) { + super.onError(t); + done.countDown(); + } + + @Override + public void onComplete() { + super.onComplete(); + done.countDown(); + } + }; + + requestBody.subscribe(subscriber); + done.await(10, TimeUnit.SECONDS); + assertThat(byteBuffer.array()).isEmpty(); + assertThat(byteBuffer.array()).isEqualTo(new byte[0]); + assertThat(requestBody.contentType()).isEqualTo(Mimetype.MIMETYPE_OCTET_STREAM); + } + + private static String createCompressibleStringOfGivenSize(int size) { + ByteBuffer data = ByteBuffer.allocate(size); + + byte[] a = new byte[size / 4]; + byte[] b = new byte[size / 4]; + Arrays.fill(a, (byte) 'a'); + Arrays.fill(b, (byte) 'b'); + + data.put(a); + data.put(b); + data.put(a); + data.put(b); + + return new String(data.array()); + } + + private static byte[] decompress(byte[] compressedData) throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(compressedData); + GZIPInputStream gzipInputStream = new GZIPInputStream(bais); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int bytesRead; + while ((bytesRead = gzipInputStream.read(buffer)) != -1) { + baos.write(buffer, 0, bytesRead); + } + gzipInputStream.close(); + byte[] decompressedData = baos.toByteArray(); + return decompressedData; + } + + private static AsyncRequestBody customAsyncRequestBodyWithoutContentLength(byte[] content) { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes(content)) + .subscribe(s); + } + }; + } +} \ No newline at end of file diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java index 3a0e7006ef8..20688925bc8 100644 --- a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/MediaStoreDataIntegrationTestBase.java @@ -83,7 +83,7 @@ private static DescribeContainerResponse waitContainerToBeActive() { .orFailAfter(Duration.ofMinutes(3)); } - protected AsyncRequestBody customAsyncRequestBodyWithoutContentLength() { + protected AsyncRequestBody customAsyncRequestBodyWithoutContentLength(byte[] body) { return new AsyncRequestBody() { @Override public Optional contentLength() { @@ -92,7 +92,7 @@ public Optional contentLength() { @Override public void subscribe(Subscriber s) { - Flowable.fromPublisher(AsyncRequestBody.fromBytes("Random text".getBytes())) + Flowable.fromPublisher(AsyncRequestBody.fromBytes(body)) .subscribe(s); } }; diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java index 9530f2319b3..bb4a2a9bf0c 100644 --- a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/RequestCompressionStreamingIntegrationTest.java @@ -27,6 +27,7 @@ import software.amazon.awssdk.core.RequestCompressionConfiguration; import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.interceptor.Context; import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; @@ -83,7 +84,7 @@ public static void setup() { asyncClient = MediaStoreDataAsyncClient.builder() .endpointOverride(uri) - .credentialsProvider(getCredentialsProvider()) + .credentialsProvider(credentialsProvider) .httpClient(NettyNioAsyncHttpClient.create()) .overrideConfiguration(o -> o.addExecutionInterceptor(new CaptureTransferEncodingHeaderInterceptor()) .addExecutionInterceptor(new CaptureContentEncodingHeaderInterceptor()) @@ -108,11 +109,13 @@ public static void setup() { } @AfterAll - public static void tearDown() { + public static void tearDown() throws InterruptedException { syncClient.deleteObject(deleteObjectRequest); Waiter.run(() -> syncClient.describeObject(r -> r.path("/foo"))) .untilException(ObjectNotFoundException.class) .orFailAfter(Duration.ofMinutes(1)); + Thread.sleep(1000); + mediaStoreClient.deleteContainer(r -> r.containerName(CONTAINER_NAME)); } @AfterEach @@ -121,7 +124,7 @@ public void cleanUp() { } @Test - public void putObject_withRequestCompressionSyncStreaming_compressesPayloadAndSendsCorrectly() throws IOException { + public void putObject_withSyncStreamingRequestCompression_compressesPayloadAndSendsCorrectly() throws IOException { TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8)); syncClient.putObject(putObjectRequest, RequestBody.fromContentProvider(provider, "binary/octet-stream")); @@ -129,29 +132,26 @@ public void putObject_withRequestCompressionSyncStreaming_compressesPayloadAndSe assertThat(CaptureContentEncodingHeaderInterceptor.isGzip).isTrue(); ResponseInputStream response = syncClient.getObject(getObjectRequest); - byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8).length]; + byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes().length]; response.read(buffer); String retrievedContent = new String(buffer); - assertThat(UNCOMPRESSED_BODY).isEqualTo(retrievedContent); + assertThat(retrievedContent).isEqualTo(UNCOMPRESSED_BODY); } - // TODO : uncomment once async streaming compression is implemented - /*@Test - public void nettyClientPutObject_withoutContentLength_sendsSuccessfully() throws IOException { - AsyncRequestBody asyncRequestBody = customAsyncRequestBodyWithoutContentLength(); + @Test + public void putObject_withAsyncStreamingRequestCompression_compressesPayloadAndSendsCorrectly() throws IOException { + AsyncRequestBody asyncRequestBody = customAsyncRequestBodyWithoutContentLength(UNCOMPRESSED_BODY.getBytes()); asyncClient.putObject(putObjectRequest, asyncRequestBody).join(); assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); assertThat(CaptureContentEncodingHeaderInterceptor.isGzip).isTrue(); - // verify stored content is correct ResponseInputStream response = syncClient.getObject(getObjectRequest); - byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8).length]; + byte[] buffer = new byte[UNCOMPRESSED_BODY.getBytes().length]; response.read(buffer); String retrievedContent = new String(buffer); - assertThat(UNCOMPRESSED_BODY).isEqualTo(retrievedContent); - assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); - }*/ + assertThat(retrievedContent).isEqualTo(UNCOMPRESSED_BODY); + } private static class CaptureContentEncodingHeaderInterceptor implements ExecutionInterceptor { public static boolean isGzip; diff --git a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java index 80fb67dc6fa..34522618f75 100644 --- a/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java +++ b/services/mediastoredata/src/it/java/software/amazon/awssdk/services/mediastoredata/TransferEncodingChunkedIntegrationTest.java @@ -76,11 +76,12 @@ public static void setup() { } @AfterAll - public static void tearDown() { + public static void tearDown() throws InterruptedException { syncClientWithApache.deleteObject(deleteObjectRequest); Waiter.run(() -> syncClientWithApache.describeObject(r -> r.path("/foo"))) .untilException(ObjectNotFoundException.class) .orFailAfter(Duration.ofMinutes(1)); + Thread.sleep(500); mediaStoreClient.deleteContainer(r -> r.containerName(CONTAINER_NAME)); } @@ -100,7 +101,7 @@ public void urlConnectionClientPutObject_withoutContentLength_sendsSuccessfully( @Test public void nettyClientPutObject_withoutContentLength_sendsSuccessfully() { - asyncClientWithNetty.putObject(putObjectRequest, customAsyncRequestBodyWithoutContentLength()).join(); + asyncClientWithNetty.putObject(putObjectRequest, customAsyncRequestBodyWithoutContentLength("TestBody".getBytes())).join(); assertThat(CaptureTransferEncodingHeaderInterceptor.isChunked).isTrue(); } } diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java new file mode 100644 index 00000000000..5a8f1f50dbc --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/AsyncRequestCompressionTest.java @@ -0,0 +1,205 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import static org.assertj.core.api.Assertions.assertThat; + +import io.reactivex.Flowable; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.Optional; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.internal.compression.Compressor; +import software.amazon.awssdk.core.internal.compression.GzipCompressor; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithRequestCompressionRequest; +import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithStreamingRequestCompressionRequest; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; + +public class AsyncRequestCompressionTest { + private static final String UNCOMPRESSED_BODY = + "RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest-RequestCompressionTest"; + private String compressedBody; + private int compressedLen; + private MockAsyncHttpClient mockAsyncHttpClient; + private ProtocolRestJsonAsyncClient asyncClient; + private Compressor compressor; + + @BeforeEach + public void setUp() { + mockAsyncHttpClient = new MockAsyncHttpClient(); + asyncClient = ProtocolRestJsonAsyncClient.builder() + .credentialsProvider(AnonymousCredentialsProvider.create()) + .region(Region.US_EAST_1) + .httpClient(mockAsyncHttpClient) + .build(); + compressor = new GzipCompressor(); + byte[] compressedBodyBytes = compressor.compress(UNCOMPRESSED_BODY.getBytes()); + compressedBody = new String(compressedBodyBytes); + compressedLen = compressedBodyBytes.length; + } + + @AfterEach + public void reset() { + mockAsyncHttpClient.reset(); + } + + @Test + public void asyncNonStreamingOperation_compressionEnabledThresholdOverridden_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.requestCompressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void asyncNonStreamingOperation_payloadSizeLessThanCompressionThreshold_doesNotCompress() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(UNCOMPRESSED_BODY); + assertThat(loggedSize).isEqualTo(UNCOMPRESSED_BODY.length()); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding")).isEmpty(); + } + + @Test + public void asyncStreamingOperation_compressionEnabled_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + mockAsyncHttpClient.setAsyncRequestBodyLength(compressedBody.length()); + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + asyncClient.putOperationWithStreamingRequestCompression(request, customAsyncRequestBodyWithoutContentLength(), + AsyncResponseTransformer.toBytes()).join(); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + String loggedBody = new String(mockAsyncHttpClient.getStreamingPayload()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + @Test + public void asyncNonStreamingOperation_compressionEnabledThresholdOverriddenWithRetry_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + PutOperationWithRequestCompressionRequest request = + PutOperationWithRequestCompressionRequest.builder() + .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) + .overrideConfiguration(o -> o.requestCompressionConfiguration( + c -> c.minimumCompressionThresholdInBytes(1))) + .build(); + + asyncClient.putOperationWithRequestCompression(request); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); + String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); + int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedSize).isEqualTo(compressedLen); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + } + + @Test + public void asyncStreamingOperation_compressionEnabledWithRetry_compressesCorrectly() { + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + + mockAsyncHttpClient.setAsyncRequestBodyLength(compressedBody.length()); + PutOperationWithStreamingRequestCompressionRequest request = + PutOperationWithStreamingRequestCompressionRequest.builder().build(); + asyncClient.putOperationWithStreamingRequestCompression(request, customAsyncRequestBodyWithoutContentLength(), + AsyncResponseTransformer.toBytes()).join(); + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + String loggedBody = new String(mockAsyncHttpClient.getStreamingPayload()); + + assertThat(loggedBody).isEqualTo(compressedBody); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedRequest.matchingHeaders("Content-Length")).isEmpty(); + assertThat(loggedRequest.firstMatchingHeader("Transfer-Encoding").get()).isEqualTo("chunked"); + } + + private HttpExecuteResponse mockResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + } + + private HttpExecuteResponse mockErrorResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(500).build()) + .build(); + } + + protected AsyncRequestBody customAsyncRequestBodyWithoutContentLength() { + return new AsyncRequestBody() { + @Override + public Optional contentLength() { + return Optional.empty(); + } + + @Override + public void subscribe(Subscriber s) { + Flowable.fromPublisher(AsyncRequestBody.fromBytes(UNCOMPRESSED_BODY.getBytes())) + .subscribe(s); + } + }; + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java index 29664c5f53f..14cb0712503 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/RequestCompressionTest.java @@ -22,7 +22,6 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.charset.StandardCharsets; import java.time.Duration; import java.util.ArrayList; import java.util.List; @@ -40,11 +39,9 @@ import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpResponse; import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithRequestCompressionRequest; import software.amazon.awssdk.services.protocolrestjson.model.PutOperationWithStreamingRequestCompressionRequest; -import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; public class RequestCompressionTest { @@ -53,42 +50,33 @@ public class RequestCompressionTest { private String compressedBody; private int compressedLen; private MockSyncHttpClient mockHttpClient; - private MockAsyncHttpClient mockAsyncHttpClient; private ProtocolRestJsonClient syncClient; - private ProtocolRestJsonAsyncClient asyncClient; private Compressor compressor; private RequestBody requestBody; @BeforeEach public void setUp() { mockHttpClient = new MockSyncHttpClient(); - mockAsyncHttpClient = new MockAsyncHttpClient(); syncClient = ProtocolRestJsonClient.builder() .credentialsProvider(AnonymousCredentialsProvider.create()) .region(Region.US_EAST_1) .httpClient(mockHttpClient) .build(); - asyncClient = ProtocolRestJsonAsyncClient.builder() - .credentialsProvider(AnonymousCredentialsProvider.create()) - .region(Region.US_EAST_1) - .httpClient(mockAsyncHttpClient) - .build(); compressor = new GzipCompressor(); - byte[] compressedBodyBytes = compressor.compress(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)).asByteArray(); + byte[] compressedBodyBytes = compressor.compress(UNCOMPRESSED_BODY.getBytes()); compressedLen = compressedBodyBytes.length; compressedBody = new String(compressedBodyBytes); - TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes(StandardCharsets.UTF_8)); + TestContentProvider provider = new TestContentProvider(UNCOMPRESSED_BODY.getBytes()); requestBody = RequestBody.fromContentProvider(provider, "binary/octet-stream"); } @AfterEach public void reset() { mockHttpClient.reset(); - mockAsyncHttpClient.reset(); } @Test - public void sync_nonStreaming_compression_compressesCorrectly() { + public void syncNonStreamingOperation_compressionEnabledThresholdOverridden_compressesCorrectly() { mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); PutOperationWithRequestCompressionRequest request = @@ -110,30 +98,25 @@ public void sync_nonStreaming_compression_compressesCorrectly() { } @Test - public void async_nonStreaming_compression_compressesCorrectly() { - mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); + public void syncNonStreamingOperation_payloadSizeLessThanCompressionThreshold_doesNotCompress() { + mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); PutOperationWithRequestCompressionRequest request = PutOperationWithRequestCompressionRequest.builder() .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) - .overrideConfiguration(o -> o.requestCompressionConfiguration( - c -> c.minimumCompressionThresholdInBytes(1))) .build(); + syncClient.putOperationWithRequestCompression(request); - asyncClient.putOperationWithRequestCompression(request); - - SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); - int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); - assertThat(loggedBody).isEqualTo(compressedBody); - assertThat(loggedSize).isEqualTo(compressedLen); - assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); + assertThat(loggedBody).isEqualTo(UNCOMPRESSED_BODY); + assertThat(loggedRequest.firstMatchingHeader("Content-encoding")).isEmpty(); } @Test - public void sync_streaming_compression_compressesCorrectly() { + public void syncStreamingOperation_compressionEnabled_compressesCorrectly() { mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); PutOperationWithStreamingRequestCompressionRequest request = @@ -151,7 +134,7 @@ public void sync_streaming_compression_compressesCorrectly() { } @Test - public void sync_nonStreaming_compression_withRetry_compressesCorrectly() { + public void syncNonStreamingOperation_compressionEnabledThresholdOverriddenWithRetry_compressesCorrectly() { mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); @@ -174,31 +157,7 @@ public void sync_nonStreaming_compression_withRetry_compressesCorrectly() { } @Test - public void async_nonStreaming_compression_withRetry_compressesCorrectly() { - mockAsyncHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); - mockAsyncHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); - - PutOperationWithRequestCompressionRequest request = - PutOperationWithRequestCompressionRequest.builder() - .body(SdkBytes.fromUtf8String(UNCOMPRESSED_BODY)) - .overrideConfiguration(o -> o.requestCompressionConfiguration( - c -> c.minimumCompressionThresholdInBytes(1))) - .build(); - - asyncClient.putOperationWithRequestCompression(request); - - SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); - InputStream loggedStream = loggedRequest.contentStreamProvider().get().newStream(); - String loggedBody = new String(SdkBytes.fromInputStream(loggedStream).asByteArray()); - int loggedSize = Integer.valueOf(loggedRequest.firstMatchingHeader("Content-Length").get()); - - assertThat(loggedBody).isEqualTo(compressedBody); - assertThat(loggedSize).isEqualTo(compressedLen); - assertThat(loggedRequest.firstMatchingHeader("Content-encoding").get()).isEqualTo("gzip"); - } - - @Test - public void sync_streaming_compression_withRetry_compressesCorrectly() { + public void syncStreamingOperation_compressionEnabledWithRetry_compressesCorrectly() { mockHttpClient.stubNextResponse(mockErrorResponse(), Duration.ofMillis(500)); mockHttpClient.stubNextResponse(mockResponse(), Duration.ofMillis(500)); diff --git a/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java b/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java index 8a3f62f7838..16a7732cb18 100644 --- a/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java +++ b/test/service-test-utils/src/main/java/software/amazon/awssdk/testutils/service/http/MockAsyncHttpClient.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicInteger; @@ -50,6 +51,8 @@ public final class MockAsyncHttpClient implements SdkAsyncHttpClient, MockHttpCl private final List> responses = new LinkedList<>(); private final AtomicInteger responseIndex = new AtomicInteger(0); private final ExecutorService executor; + private int asyncRequestBodyLength = -1; + private byte[] streamingPayload; public MockAsyncHttpClient() { this.executor = Executors.newFixedThreadPool(3); @@ -66,6 +69,11 @@ public CompletableFuture execute(AsyncExecuteRequest request) { request.responseHandler().onHeaders(nextResponse.httpResponse()); CompletableFuture.runAsync(() -> request.responseHandler().onStream(new ResponsePublisher(content, index)), executor); + + if (asyncRequestBodyLength > 0) { + captureStreamingPayload(request.requestContentPublisher()); + } + return CompletableFuture.completedFuture(null); } @@ -122,7 +130,28 @@ public void stubResponses(HttpExecuteResponse... responses) { this.responseIndex.set(0); } - private class ResponsePublisher implements SdkHttpContentPublisher { + /** + * Enable capturing the streaming payload by setting the length of the AsyncRequestBody. + */ + public void setAsyncRequestBodyLength(int asyncRequestBodyLength) { + this.asyncRequestBodyLength = asyncRequestBodyLength; + } + + private void captureStreamingPayload(SdkHttpContentPublisher publisher) { + ByteBuffer byteBuffer = ByteBuffer.allocate(asyncRequestBodyLength); + Subscriber subscriber = new CapturingSubscriber(byteBuffer); + publisher.subscribe(subscriber); + streamingPayload = byteBuffer.array(); + } + + /** + * Returns the streaming payload byte array, if the asyncRequestBodyLength was set correctly. Otherwise, returns null. + */ + public byte[] getStreamingPayload() { + return streamingPayload.clone(); + } + + private final class ResponsePublisher implements SdkHttpContentPublisher { private final byte[] content; private final int index; @@ -165,4 +194,35 @@ public void cancel() { }); } } + + private static class CapturingSubscriber implements Subscriber { + private ByteBuffer byteBuffer; + private CountDownLatch done = new CountDownLatch(1); + + CapturingSubscriber(ByteBuffer byteBuffer) { + this.byteBuffer = byteBuffer; + } + + @Override + public void onSubscribe(Subscription subscription) { + subscription.request(Long.MAX_VALUE); + } + + @Override + public void onNext(ByteBuffer buffer) { + byte[] bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + byteBuffer.put(bytes); + } + + @Override + public void onError(Throwable t) { + done.countDown(); + } + + @Override + public void onComplete() { + done.countDown(); + } + } } From f81aad8b4a1527a7267293cea3d2ebb4c51855d9 Mon Sep 17 00:00:00 2001 From: David Ho <70000000+davidh44@users.noreply.github.com> Date: Tue, 29 Aug 2023 17:16:42 -0700 Subject: [PATCH 17/17] Merge from master (#4369) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Amazon Elastic Kubernetes Service Update: Add multiple customer error code to handle customer caused failure when managing EKS node groups * Release 2.20.113. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.114-SNAPSHOT * Cleanup unused imports/members (#4234) * Amazon Simple Queue Service Update: Documentation changes related to SQS APIs. * Release 2.20.114. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.115-SNAPSHOT * Managed Streaming for Kafka Update: Amazon MSK has introduced new versions of ListClusterOperations and DescribeClusterOperation APIs. These v2 APIs provide information and insights into the ongoing operations of both MSK Provisioned and MSK Serverless clusters. * Amazon CloudFront Update: Add a new JavaScript runtime version for CloudFront Functions. * Amazon Pinpoint Update: Added support for sending push notifications using the FCM v1 API with json credentials. Amazon Pinpoint customers can now deliver messages to Android devices using both FCM v1 API and the legacy FCM/GCM API * Amazon Connect Service Update: This release adds support for new number types. * Amazon CloudWatch Application Insights Update: This release enable customer to add/remove/update more than one workload for a component * AWS CloudFormation Update: This SDK release is for the feature launch of AWS CloudFormation RetainExceptOnCreate. It adds a new parameter retainExceptOnCreate in the following APIs: CreateStack, UpdateStack, RollbackStack, ExecuteChangeSet. * Updated endpoints.json and partitions.json. * Release 2.20.115. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.116-SNAPSHOT * AWS Amplify UI Builder Update: Amplify Studio releases GraphQL support for codegen job action. * Elastic Disaster Recovery Service Update: Add support for in-aws right sizing * AWS CodeStar connections Update: New integration with the Gitlab provider type. * AWS Clean Rooms Service Update: This release introduces custom SQL queries - an expanded set of SQL you can run. This release adds analysis templates, a new resource for storing pre-defined custom SQL queries ahead of time. This release also adds the Custom analysis rule, which lets you approve analysis templates for querying. * Amazon Omics Update: Add CreationType filter for ListReadSets * Inspector2 Update: This release adds 1 new API: BatchGetFindingDetails to retrieve enhanced vulnerability intelligence details for findings. * Amazon Relational Database Service Update: This release adds support for Aurora MySQL local write forwarding, which allows for forwarding of write operations from reader DB instances to the writer DB instance. * Amazon Lookout for Equipment Update: This release includes new import resource, model versioning and resource policy features. * Amazon EventBridge Scheduler Update: This release introduces automatic deletion of schedules in EventBridge Scheduler. If configured, EventBridge Scheduler automatically deletes a schedule after the schedule has completed its last invocation. * Amazon Route 53 Update: Amazon Route 53 now supports the Israel (Tel Aviv) Region (il-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. * Auto Scaling Update: You can now configure an instance refresh to set its status to 'failed' when it detects that a specified CloudWatch alarm has gone into the ALARM state. You can also choose to roll back the instance refresh automatically when the alarm threshold is met. * Updated endpoints.json and partitions.json. * Release 2.20.116. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.117-SNAPSHOT * Amazon SageMaker Service Update: Add Stairs TrafficPattern and FlatInvocations to RecommendationJobStoppingConditions * AWS Batch Update: This release adds support for price capacity optimized allocation strategy for Spot Instances. * Amazon CloudWatch Internet Monitor Update: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to set custom thresholds, for performance and availability drops, for impact limited to a single city-network to trigger creation of a health event. * Amazon Relational Database Service Update: Added support for deleted clusters PiTR. * AWS Database Migration Service Update: Adding new API describe-engine-versions which provides information about the lifecycle of a replication instance's version. * AWS Elemental MediaLive Update: AWS Elemental Link devices now report their Availability Zone. Link devices now support the ability to change their Availability Zone. * Amazon Polly Update: Amazon Polly adds new French Belgian voice - Isabelle. Isabelle is available as Neural voice only. * Updated endpoints.json and partitions.json. * Release 2.20.117. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.118-SNAPSHOT * AWS Resilience Hub Update: Drift Detection capability added when applications policy has moved from a meet to breach state. Customers will be able to exclude operational recommendations and receive credit in their resilience score. Customers can now add ARH permissions to an existing or new role. * Amazon Cognito Identity Provider Update: New feature that logs Cognito user pool error messages to CloudWatch logs. * AWS Glue Update: This release includes additional Glue Streaming KAKFA SASL property types. * Amazon SageMaker Service Update: SageMaker Inference Recommender introduces a new API GetScalingConfigurationRecommendation to recommend auto scaling policies based on completed Inference Recommender jobs. * AWS Budgets Update: As part of CAE tagging integration we need to update our budget names regex filter to prevent customers from using "/action/" in their budget names. * Updated endpoints.json and partitions.json. * Release 2.20.118. Updated CHANGELOG.md, README.md and all pom.xml. * Fixed an issue in ChecksumCalculatingAsyncRequestBody where the posit… (#4244) * Fixed an issue in ChecksumCalculatingAsyncRequestBody where the position of the ByteBuffer was not honored. * Fix checkstyle * rename methods and variables * Add javadocs * Update to next snapshot version: 2.20.119-SNAPSHOT * Add Expect 100-continue for UploadPartRequest (#4252) * Add Expect 100-continue for UploadPartRequest * Fix typo * Bump crt to 0.24.0 (#4256) * Amazon SageMaker Service Update: Amazon SageMaker now supports running training jobs on p5.48xlarge instance types. * Amazon Elastic Compute Cloud Update: This release adds new parameter isPrimaryIPv6 to allow assigning an IPv6 address as a primary IPv6 address to a network interface which cannot be changed to give equivalent functionality available for network interfaces with primary IPv4 address. * Auto Scaling Update: Documentation changes related to Amazon EC2 Auto Scaling APIs. * AWS Database Migration Service Update: The release makes public API for DMS Schema Conversion feature. * AWS Cloud9 Update: Updated the deprecation date for Amazon Linux. Doc only update. * Release 2.20.119. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.120-SNAPSHOT * AWS Certificate Manager Private Certificate Authority Update: Documentation correction for AWS Private CA * Amazon Connect Service Update: Added a new API UpdateRoutingProfileAgentAvailabilityTimer to update agent availability timer of a routing profile. * Amazon SageMaker Service Update: Including DataCaptureConfig key in the Amazon Sagemaker Search's transform job object * AWS DataSync Update: Display cloud storage used capacity at a cluster level. * Amazon EC2 Container Service Update: This is a documentation update to address various tickets. * Updated endpoints.json and partitions.json. * Release 2.20.120. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.121-SNAPSHOT * Java based S3 Multipart Client (#4254) * Implement multipart upload in Java-based S3 async client (#4052) * Implement multipart upload in Java-based S3 async client Co-authored-by: Matthew Miller * Iterate SdkFields to convert requests (#4177) * Iterate SdkFields to convert requests * Fix flaky test * Rename convertion utils class * Fix null content length in SplittingPublisher (#4173) * Implement multipart copy in Java-based S3 async client (#4189) * Create split method in AsyncRequestBody to return SplittingPublisher (#4188) * Create split method in AsyncRequestBody to return SplittingPublisher * Fix Javadoc and build * Add more tests with ByteArrayAsyncRequestBody (#4214) * Handle null response metadata (#4215) * Handle null response metadata * Fix build * Support streaming with unknown content length (#4226) * Support uploading with unknown content length * Refactoring * Create a configuration class for SdkPublisher#split (#4236) * S3 Multipart API implementation (#4235) * Multipart API fix merge conflicts * getObject(...) throw UnsupportedOperationException * Use user agent for all requests in MultipartS3Client * MultipartS3AsyncClient javadoc + API_NAME private * use `maximumMemoryUsageInBytes` * fix problem with UserAgent, cleanup * move contextParam keys to S3AsyncClientDecorator * javadoc * more javadoc * Use 4x part size as default apiCallBufferSize * Fix test * Guard against re-subscription in SplittingPublisher (#4253) * guard against re-subscription in SplittingPublisher * fix checkstyle * Error msg * Fix a race condition where the third upload part request was sent before the second one (#4260) --------- Co-authored-by: Zoe Wang <33073555+zoewangg@users.noreply.github.com> * Amazon Interactive Video Service RealTime Update: Add QUOTA_EXCEEDED and PUBLISHER_NOT_FOUND to EventErrorCode for stage health events. * Amazon Kinesis Video Streams Update: This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature. * Amazon Rekognition Update: This release adds code snippets for Amazon Rekognition Custom Labels. * Amazon Kinesis Video Streams Archived Media Update: This release enables minimum of Images SamplingInterval to be as low as 200 milliseconds in Kinesis Video Stream Image feature. * Amazon Detective Update: Updated the email validation regex to be in line with the TLD name specifications. * Updated endpoints.json and partitions.json. * Release 2.20.121. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.122-SNAPSHOT * Update to next snapshot version: 2.20.122-SNAPSHOT (#4275) * Update to next snapshot version: 2.20.122-SNAPSHOT * Revert previous version number * AWS Service Catalog Update: Introduce support for HashiCorp Terraform Cloud in Service Catalog by addying TERRAFORM_CLOUD product type in CreateProduct and CreateProvisioningArtifact API. * AWS Backup Update: This release introduces a new logically air-gapped vault (Preview) in AWS Backup that stores immutable backup copies, which are locked by default and isolated with encryption using AWS owned keys. Logically air-gapped vault (Preview) allows secure recovery of application data across accounts. * Amazon ElastiCache Update: Added support for cluster mode in online migration and test migration API * Updated endpoints.json and partitions.json. * Release 2.20.122. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.123-SNAPSHOT * Add default methods to AwsServiceClientConfiguration.Builder and SdkServiceClientConfiguration.Builder. (#4263) This allows older client versions to still compile with newer runtime versions. * Expose thresholdSizeInBytes in AWS CRT-based S3 client (#4282) * AWS Global Accelerator Update: Documentation update for dualstack EC2 endpoint support * Amazon FSx Update: For FSx for Lustre, add new data repository task type, RELEASE_DATA_FROM_FILESYSTEM, to release files that have been archived to S3. For FSx for Windows, enable support for configuring and updating SSD IOPS, and for updating storage type. For FSx for OpenZFS, add new deployment type, MULTI_AZ_1. * Amazon Chime SDK Voice Update: Updating CreatePhoneNumberOrder, UpdatePhoneNumber and BatchUpdatePhoneNumbers APIs, adding phone number name * Amazon GuardDuty Update: Added autoEnable ALL to UpdateOrganizationConfiguration and DescribeOrganizationConfiguration APIs. * Amazon SageMaker Service Update: This release adds support for cross account access for SageMaker Model Cards through AWS RAM. * Updated endpoints.json and partitions.json. * Release 2.20.123. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.124-SNAPSHOT * Fix immutableCopyOf bug (#4266) * Set limit when cloning ByteBuffer * Add changelog * Add L-Applin and breader124 to the all-contributors hall (#4276) * Fix for Issue [#4156](https://github.com/aws/aws-sdk-java-v2/issues/4156) : Single quotes in toJson conversions for EnhancedDocuments are no longer being escaped. (#4277) * Amazon Connect Service Update: This release adds APIs to provision agents that are global / available in multiple AWS regions and distribute them across these regions by percentage. * AWS Secrets Manager Update: Add additional InvalidRequestException to list of possible exceptions for ListSecret. * AWS CloudTrail Update: Documentation updates for CloudTrail. * AWS Transfer Family Update: Documentation updates for AW Transfer Family * Elastic Load Balancing Update: This release enables configuring security groups for Network Load Balancers * Amazon Omics Update: This release adds instanceType to GetRunTask & ListRunTasks responses. * Updated endpoints.json and partitions.json. * Release 2.20.124. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.125-SNAPSHOT * AmplifyBackend Update: Adds sensitive trait to required input shapes. * AWS Config Update: Updated ResourceType enum with new resource types onboarded by AWS Config in July 2023. * Amazon Elastic Compute Cloud Update: Amazon EC2 P5 instances, powered by the latest NVIDIA H100 Tensor Core GPUs, deliver the highest performance in EC2 for deep learning (DL) and HPC applications. M7i-flex and M7i instances are next-generation general purpose instances powered by custom 4th Generation Intel Xeon Scalable processors. * Amazon Simple Email Service Update: Doc only updates to include: 1) Clarified which part of an email address where it's okay to have Punycode when it contains non-ASCII characters for the SendRawEmail action and other actions where this is applicable. 2) Updated S3Action description with new MB max bucket size from 30 to 40. * Amazon Simple Workflow Service Update: This release adds new API parameters to override workflow task list for workflow executions. * Amazon QuickSight Update: New Authentication method for Account subscription - IAM Identity Center. Hierarchy layout support, default column width support and related style properties for pivot table visuals. Non-additive topic field aggregations for Topic API * Updated endpoints.json and partitions.json. * Release 2.20.125. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.126-SNAPSHOT * AWS Elemental MediaPackage Update: Fix SDK logging of certain fields. * Amazon Omics Update: This release provides support for annotation store versioning and cross account sharing for Omics Analytics * AWS Transfer Family Update: Documentation updates for AWS Transfer Family * Updated endpoints.json and partitions.json. * Release 2.20.126. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.127-SNAPSHOT * S3 Benchmarks - support java-based multipart client (#4288) S3 Benchmarks - support java-based multipart client * Amazon Route 53 Domains Update: Provide explanation if CheckDomainTransferability return false. Provide requestId if a request is already submitted. Add sensitive protection for customer information * AWS Glue Update: AWS Glue Crawlers can now accept SerDe overrides from a custom csv classifier. The two SerDe options are LazySimpleSerDe and OpenCSVSerDe. In case, the user wants crawler to do the selection, "None" can be selected for this purpose. * Amazon SageMaker Service Update: SageMaker Inference Recommender now provides SupportedResponseMIMETypes from DescribeInferenceRecommendationsJob response * AWS Performance Insights Update: AWS Performance Insights for Amazon RDS is launching Performance Analysis On Demand, a new feature that allows you to analyze database performance metrics and find out the performance issues. You can now use SDK to create, list, get, delete, and manage tags of performance analysis reports. * Amazon Elastic Compute Cloud Update: Documentation updates for Elastic Compute Cloud (EC2). * Amazon Chime SDK Meetings Update: Updated API documentation to include additional exceptions. * Updated endpoints.json and partitions.json. * Release 2.20.127. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.128-SNAPSHOT * Fixed the issue where thresholdInBytes is not the same as minimalPart… (#4289) * Fixed the issue where thresholdInBytes is not the same as minimalPartSizeInBytes if a custom minimalPartSizeInBytes is provided. * Address feedback * Add more test * Update LaunchChangelog.md to add IAM Policy Builder (#4301) * Add SERVICE_ENDPOINT metric (#4307) * Implement SERVICE_ENDPOINT core metric * Create CollectionStages for SERVICE_ENDPOINT * Remove erviceEndpointMetricCollectionStages since they're unnecessary * Move collection of SERVICE_ENDPOINT metric Simplify by collecting in the API Call metric collection stages where other request metrics are already being collected. * Remove unused import * Update changelog entry * Fix test * Remove unused class --------- Co-authored-by: David Negrete * Amazon CloudWatch Update: Doc-only update to incorporate several doc bug fixes * Amazon Lex Model Building V2 Update: This release updates the Custom Vocabulary Weight field to support a value of 0. * Release 2.20.128. Updated CHANGELOG.md, README.md and all pom.xml. * Ensure onNext will be called even if publishing empty content and onC… (#4290) * Ensure onNext will be called even if publishing empty content and onComplete is called directly * Adding changelog and removing unnecessary override * Update to next snapshot version: 2.20.129-SNAPSHOT * Fix javadocs links - consumer request objects (#4310) * Fix javadocs links - consumer request objects * Use getShapeName instead of getC2jName * Use fully qualified name * Update codegen test classes * changing terminology to more inclusive terms (#4115) * Amazon Elastic Compute Cloud Update: Adds support for SubnetConfigurations to allow users to select their own IPv4 and IPv6 addresses for Interface VPC endpoints * Amazon GameLift Update: Amazon GameLift updates its instance types support. * Updated endpoints.json and partitions.json. * Release 2.20.129. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.130-SNAPSHOT * AWS SecurityHub Update: Added Inspector Lambda code Vulnerability section to ASFF, including GeneratorDetails, EpssScore, ExploitAvailable, and CodeVulnerabilities. * AWS CodeCommit Update: Add new ListFileCommitHistory operation to retrieve commits which introduced changes to a specific file. * Updated endpoints.json and partitions.json. * Release 2.20.130. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.131-SNAPSHOT * AWS Cloud9 Update: Doc only update to add Ubuntu 22.04 as an Image ID option for Cloud9 * Amazon Route 53 Domains Update: Fixed typos in description fields * Amazon Elastic Compute Cloud Update: The DeleteKeyPair API has been updated to return the keyPairId when an existing key pair is deleted. * Amazon Relational Database Service Update: Adding support for RDS Aurora Global Database Unplanned Failover * FinSpace User Environment Management service Update: Allow customers to manage outbound traffic from their Kx Environment when attaching a transit gateway by providing network acl entries. Allow the customer to choose how they want to update the databases on a cluster allowing updates to possibly be faster than usual. * Updated endpoints.json and partitions.json. * Release 2.20.131. Updated CHANGELOG.md, README.md and all pom.xml. * Corrected a minor JavaDoc typo in the DynamoDbBean class. (#4325) * Remove unused address interceptor (#4316) * Bump reactive-streams from 1.0.3 to 1.0.4 (#4322) Signed-off-by: Chad Wilson * Update to next snapshot version: 2.20.132-SNAPSHOT * Modify ArnResource to handle more variants of resources (#4317) * Changes default behavior for DynamoDb Enhanced atomic counter extension (#4314) * Changes the default behavior for DynamoDb Enhanced atomic counter extension to filter out any counter attributes in the item map if present * docs: add chadlwilson as a contributor for code (#4330) * docs: update README.md [skip ci] * docs: update .all-contributorsrc [skip ci] --------- Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> * Amazon Verified Permissions Update: Documentation updates for Amazon Verified Permissions. Increases max results per page for ListPolicyStores, ListPolicies, and ListPolicyTemplates APIs from 20 to 50. * AWS Cost Explorer Service Update: This release adds the LastUpdatedDate and LastUsedDate timestamps to help you manage your cost allocation tags. * Amazon Relational Database Service Update: Adding parameters to CreateCustomDbEngineVersion reserved for future use. * AWS Global Accelerator Update: Global Accelerator now supports Client Ip Preservation for Network Load Balancer endpoints. * Updated endpoints.json and partitions.json. * Release 2.20.132. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.133-SNAPSHOT * Amazon API Gateway Update: This release adds RootResourceId to GetRestApi response. * Amazon Polly Update: Amazon Polly adds 1 new voice - Zayd (ar-AE) * Amazon Elastic Compute Cloud Update: Marking fields as sensitive on BundleTask and GetPasswordData * Updated endpoints.json and partitions.json. * Release 2.20.133. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.134-SNAPSHOT * Add support for presigned DeleteObject (#4313) * Add support for presigned DeleteObject * Review comments - Add proper annotations - Add equals and hashCode * Add auth-crt to bom (#4338) * AWS Elemental MediaLive Update: MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings. * Amazon Relational Database Service Update: This release updates the supported versions for Percona XtraBackup in Aurora MySQL. * AWS MediaTailor Update: Adds new source location AUTODETECT_SIGV4 access type. * Amazon QuickSight Update: Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support. * AWS S3 Control Update: Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack. * AWS Elemental MediaConvert Update: This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class. * Amazon Simple Storage Service Update: Updates to endpoint ruleset tests to address Smithy validation issues. * AWS Glue Update: Added API attributes that help in the monitoring of sessions. * Amazon Verified Permissions Update: Documentation updates for Amazon Verified Permissions. * Amazon Elastic Compute Cloud Update: Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances. * Updated endpoints.json and partitions.json. * Release 2.20.134. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.135-SNAPSHOT * fix IamPolicy code example docs. (#4324) Co-authored-by: Debora N. Ito <476307+debora-ito@users.noreply.github.com> * Add option to output all results to disk (#4344) * docs: add ManishDait as a contributor for doc (#4343) * docs: update README.md [skip ci] * docs: update .all-contributorsrc [skip ci] --------- Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> * Amazon Detective Update: Added protections to interacting with fields containing customer information. * AWS CloudTrail Update: Add ThrottlingException with error code 429 to handle CloudTrail Delegated Admin request rate exceeded on organization resources. * Amazon CloudWatch Update: Doc-only update to get doc bug fixes into the SDK docs * Updated endpoints.json and partitions.json. * Release 2.20.135. Updated CHANGELOG.md, README.md and all pom.xml. * Revert "Modify ArnResource to handle more variants of resources (#4317)" This reverts commit 0a37e9f4f58616c9e81da6c3a78c7bcb423c9a57. * Update to next snapshot version: 2.20.136-SNAPSHOT * Revert "Modify ArnResource to handle more variants of resources (#4317)" (#4349) This reverts commit 0a37e9f4f58616c9e81da6c3a78c7bcb423c9a57. * Use zoned date time in benchmark params (#4351) This is useful when the results are processed across different systems. * AWS Organizations Update: Documentation updates for permissions and links. * AWS Backup Update: Add support for customizing time zone for backup window in backup plan rules. * Amazon WorkSpaces Web Update: WorkSpaces Web now enables Admins to configure which cookies are synchronized from an end-user's local browser to the in-session browser. In conjunction with a browser extension, this feature enables enhanced Single-Sign On capability by reducing the number of times an end-user has to authenticate. * Service Quotas Update: Service Quotas now supports viewing the applied quota value and requesting a quota increase for a specific resource in an AWS account. * AWS Compute Optimizer Update: This release enables AWS Compute Optimizer to analyze and generate licensing optimization recommendations for sql server running on EC2 instances. * Amazon Security Lake Update: Remove incorrect regex enforcement on pagination tokens. * Updated endpoints.json and partitions.json. * Release 2.20.136. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.137-SNAPSHOT * Optimize file uploads by reading from file in parallel (#4331) * Optimize file upload by reading from file in different offsets in parallel * Override split in FileAsyncRequestBody * Address feedback and fix the issue where doAfterOnComplete is invoked * address feedback * Fix issue reported by sonarcloud * Upgrade jacoco to 0.8.10 (#4360) * Upgrade jacoco to 0.8.10 * Fix test coverage reporting with java 17 * Amazon Simple Email Service Update: Adds support for the new Export and Message Insights features: create, get, list and cancel export jobs; get message insights. * Amazon Omics Update: Add RetentionMode support for Runs. * Amazon Cognito Identity Provider Update: Added API example requests and responses for several operations. Fixed the validation regex for user pools Identity Provider name. * Amazon FSx Update: Documentation updates for project quotas. * Release 2.20.137. Updated CHANGELOG.md, README.md and all pom.xml. * Update to next snapshot version: 2.20.138-SNAPSHOT --------- Signed-off-by: Chad Wilson Co-authored-by: AWS <> Co-authored-by: aws-sdk-java-automation <43143862+aws-sdk-java-automation@users.noreply.github.com> Co-authored-by: Dongie Agnir <261310+dagnir@users.noreply.github.com> Co-authored-by: Zoe Wang <33073555+zoewangg@users.noreply.github.com> Co-authored-by: Olivier L Applin Co-authored-by: Matthew Miller Co-authored-by: Debora N. Ito <476307+debora-ito@users.noreply.github.com> Co-authored-by: John Viegas <70235430+joviegas@users.noreply.github.com> Co-authored-by: David Negrete Co-authored-by: Anna-Karin Salander Co-authored-by: Chad Wilson Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> Co-authored-by: Manish Dait <90558243+ManishDait@users.noreply.github.com> --- .all-contributorsrc | 18 + .changes/2.20.126.json | 30 + .changes/2.20.127.json | 48 + .changes/2.20.128.json | 24 + .changes/2.20.129.json | 30 + .changes/2.20.130.json | 24 + .changes/2.20.131.json | 42 + .changes/2.20.132.json | 42 + .changes/2.20.133.json | 30 + .changes/2.20.134.json | 78 + .changes/2.20.135.json | 30 + .changes/2.20.136.json | 48 + .changes/2.20.137.json | 30 + CHANGELOG.md | 269 +- README.md | 12 +- archetypes/archetype-app-quickstart/pom.xml | 2 +- archetypes/archetype-lambda/pom.xml | 2 +- archetypes/archetype-tools/pom.xml | 2 +- archetypes/pom.xml | 2 +- aws-sdk-java/pom.xml | 2 +- bom-internal/pom.xml | 2 +- bom/pom.xml | 7 +- bundle/pom.xml | 2 +- codegen-lite-maven-plugin/pom.xml | 2 +- codegen-lite/pom.xml | 2 +- codegen-maven-plugin/pom.xml | 2 +- codegen/pom.xml | 2 +- .../codegen/IntermediateModelBuilder.java | 10 +- .../codegen/docs/OperationDocProvider.java | 6 +- .../customization/CustomizationConfig.java | 23 + .../poet/client/c2j/json/customization.config | 2 +- .../client/c2j/rest-json/customization.config | 2 +- .../test-json-async-client-interface.java | 71 +- .../client/test-json-client-interface.java | 68 +- .../codegen/poet/model/customization.config | 2 +- .../customization.config | 2 +- .../model/xmlnamespace/customization.config | 2 +- .../poet/paginators/customization.config | 2 +- .../poet/transform/customization.config | 2 +- core/annotations/pom.xml | 2 +- core/arns/pom.xml | 2 +- core/auth-crt/pom.xml | 2 +- core/auth/pom.xml | 2 +- core/aws-core/pom.xml | 2 +- core/crt-core/pom.xml | 2 +- core/endpoints-spi/pom.xml | 2 +- core/imds/pom.xml | 2 +- core/json-utils/pom.xml | 2 +- core/metrics-spi/pom.xml | 2 +- core/pom.xml | 2 +- core/profiles/pom.xml | 2 +- core/protocols/aws-cbor-protocol/pom.xml | 2 +- core/protocols/aws-json-protocol/pom.xml | 2 +- core/protocols/aws-query-protocol/pom.xml | 2 +- core/protocols/aws-xml-protocol/pom.xml | 2 +- core/protocols/pom.xml | 2 +- core/protocols/protocol-core/pom.xml | 2 +- core/regions/pom.xml | 2 +- .../regions/internal/region/endpoints.json | 187 +- core/sdk-core/pom.xml | 2 +- .../core/FileRequestBodyConfiguration.java | 209 + .../awssdk/core/async/AsyncRequestBody.java | 51 +- .../AsyncRequestBodySplitConfiguration.java | 10 + .../ChecksumCalculatingAsyncRequestBody.java | 42 +- .../core/internal/async/ChunkBuffer.java | 2 + .../internal/async/FileAsyncRequestBody.java | 98 +- .../FileAsyncRequestBodySplitHelper.java | 185 + .../internal/async/SplittingPublisher.java | 48 +- .../stages/ApiCallMetricCollectionStage.java | 2 + .../AsyncApiCallMetricCollectionStage.java | 2 + .../pipeline/stages/HttpChecksumStage.java | 6 +- ...AwsUnsignedChunkedEncodingInputStream.java | 43 - .../core/internal/util/ChunkContentUtils.java | 61 +- .../core/internal/util/MetricUtils.java | 21 + .../awssdk/core/metrics/CoreMetric.java | 7 + .../RetryOnExceptionsCondition.java | 8 +- .../amazon/awssdk/core/util/SdkUserAgent.java | 4 +- .../FileRequestBodyConfigurationTest.java | 73 + .../AwsChunkedEncodingInputStreamTest.java | 9 +- ...ecksumCalculatingAsyncRequestBodyTest.java | 175 +- .../FileAsyncRequestBodySplitHelperTest.java | 96 + .../async/FileAsyncRequestBodyTest.java | 86 + .../async/SplittingPublisherTest.java | 53 +- .../async/SplittingPublisherTestUtils.java | 70 + docs/LaunchChangelog.md | 1 + http-client-spi/pom.xml | 2 +- http-clients/apache-client/pom.xml | 2 +- http-clients/aws-crt-client/pom.xml | 2 +- http-clients/netty-nio-client/pom.xml | 2 +- http-clients/pom.xml | 2 +- http-clients/url-connection-client/pom.xml | 2 +- .../cloudwatch-metric-publisher/pom.xml | 2 +- metric-publishers/pom.xml | 2 +- pom.xml | 11 +- release-scripts/pom.xml | 2 +- services-custom/dynamodb-enhanced/pom.xml | 2 +- .../extensions/AtomicCounterExtension.java | 40 +- .../mapper/annotations/DynamoDbBean.java | 2 +- .../AtomicCounterExtensionTest.java | 35 +- .../functionaltests/AtomicCounterTest.java | 28 +- services-custom/iam-policy-builder/pom.xml | 2 +- .../awssdk/policybuilder/iam/IamPolicy.java | 5 +- services-custom/pom.xml | 2 +- services-custom/s3-transfer-manager/pom.xml | 2 +- services/accessanalyzer/pom.xml | 2 +- services/account/pom.xml | 2 +- services/acm/pom.xml | 2 +- services/acmpca/pom.xml | 2 +- services/alexaforbusiness/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/amp/pom.xml | 2 +- services/amplify/pom.xml | 2 +- services/amplifybackend/pom.xml | 2 +- services/amplifyuibuilder/pom.xml | 2 +- services/apigateway/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 340 +- .../codegen-resources/endpoint-tests.json | 1509 +------ .../codegen-resources/service-2.json | 70 +- services/apigatewaymanagementapi/pom.xml | 2 +- services/apigatewayv2/pom.xml | 2 +- services/appconfig/pom.xml | 2 +- services/appconfigdata/pom.xml | 2 +- services/appfabric/pom.xml | 2 +- services/appflow/pom.xml | 2 +- services/appintegrations/pom.xml | 2 +- services/applicationautoscaling/pom.xml | 2 +- services/applicationcostprofiler/pom.xml | 2 +- services/applicationdiscovery/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/applicationinsights/pom.xml | 2 +- services/appmesh/pom.xml | 2 +- services/apprunner/pom.xml | 2 +- services/appstream/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/appsync/pom.xml | 2 +- services/arczonalshift/pom.xml | 2 +- services/athena/pom.xml | 2 +- services/auditmanager/pom.xml | 2 +- services/autoscaling/pom.xml | 2 +- services/autoscalingplans/pom.xml | 2 +- services/backup/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 13 +- services/backupgateway/pom.xml | 2 +- services/backupstorage/pom.xml | 2 +- services/batch/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/billingconductor/pom.xml | 2 +- services/braket/pom.xml | 2 +- services/budgets/pom.xml | 2 +- services/chime/pom.xml | 2 +- services/chimesdkidentity/pom.xml | 2 +- services/chimesdkmediapipelines/pom.xml | 2 +- services/chimesdkmeetings/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 29 +- services/chimesdkmessaging/pom.xml | 2 +- services/chimesdkvoice/pom.xml | 2 +- services/cleanrooms/pom.xml | 2 +- services/cloud9/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- services/cloudcontrol/pom.xml | 2 +- services/clouddirectory/pom.xml | 2 +- services/cloudformation/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/cloudfront/pom.xml | 2 +- services/cloudhsm/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/cloudhsmv2/pom.xml | 2 +- services/cloudsearch/pom.xml | 2 +- services/cloudsearchdomain/pom.xml | 2 +- services/cloudtrail/pom.xml | 2 +- .../codegen-resources/service-2.json | 21 +- services/cloudtraildata/pom.xml | 2 +- services/cloudwatch/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 362 +- .../codegen-resources/service-2.json | 14 +- services/cloudwatchevents/pom.xml | 2 +- services/cloudwatchlogs/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/codeartifact/pom.xml | 2 +- services/codebuild/pom.xml | 2 +- services/codecatalyst/pom.xml | 2 +- services/codecommit/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 341 +- .../codegen-resources/endpoint-tests.json | 1322 ++---- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 178 +- services/codedeploy/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/codeguruprofiler/pom.xml | 2 +- services/codegurureviewer/pom.xml | 2 +- services/codegurusecurity/pom.xml | 2 +- services/codepipeline/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/codestar/pom.xml | 2 +- services/codestarconnections/pom.xml | 2 +- services/codestarnotifications/pom.xml | 2 +- services/cognitoidentity/pom.xml | 2 +- services/cognitoidentityprovider/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 60 +- services/cognitosync/pom.xml | 2 +- services/comprehend/pom.xml | 2 +- services/comprehendmedical/pom.xml | 2 +- services/computeoptimizer/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 363 +- services/config/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/connect/pom.xml | 2 +- services/connectcampaigns/pom.xml | 2 +- services/connectcases/pom.xml | 2 +- services/connectcontactlens/pom.xml | 2 +- services/connectparticipant/pom.xml | 2 +- services/controltower/pom.xml | 2 +- services/costandusagereport/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/costexplorer/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 510 ++- .../codegen-resources/service-2.json | 10 +- services/customerprofiles/pom.xml | 2 +- services/databasemigration/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/databrew/pom.xml | 2 +- services/dataexchange/pom.xml | 2 +- services/datapipeline/pom.xml | 2 +- services/datasync/pom.xml | 2 +- services/dax/pom.xml | 2 +- services/detective/pom.xml | 2 +- .../codegen-resources/service-2.json | 6 +- services/devicefarm/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/devopsguru/pom.xml | 2 +- services/directconnect/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/directory/pom.xml | 2 +- services/dlm/pom.xml | 2 +- services/docdb/pom.xml | 2 +- services/docdbelastic/pom.xml | 2 +- services/drs/pom.xml | 2 +- services/dynamodb/pom.xml | 2 +- services/ebs/pom.xml | 2 +- services/ec2/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/service-2.json | 109 +- services/ec2instanceconnect/pom.xml | 2 +- services/ecr/pom.xml | 2 +- services/ecrpublic/pom.xml | 2 +- services/ecs/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/efs/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/eks/pom.xml | 2 +- services/elasticache/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/elasticbeanstalk/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/elasticinference/pom.xml | 2 +- services/elasticloadbalancing/pom.xml | 2 +- services/elasticloadbalancingv2/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/elasticsearch/pom.xml | 2 +- services/elastictranscoder/pom.xml | 2 +- services/emr/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/emrcontainers/pom.xml | 2 +- services/emrserverless/pom.xml | 2 +- services/entityresolution/pom.xml | 2 +- services/eventbridge/pom.xml | 2 +- services/evidently/pom.xml | 2 +- services/finspace/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 170 +- services/finspacedata/pom.xml | 2 +- services/firehose/pom.xml | 2 +- .../firehose/ServiceIntegrationTest.java | 132 - services/fis/pom.xml | 2 +- services/fms/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/forecast/pom.xml | 2 +- services/forecastquery/pom.xml | 2 +- services/frauddetector/pom.xml | 2 +- services/fsx/pom.xml | 2 +- .../codegen-resources/service-2.json | 50 +- services/gamelift/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 233 +- services/gamesparks/pom.xml | 2 +- services/glacier/pom.xml | 2 +- services/globalaccelerator/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/service-2.json | 4 +- services/glue/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/service-2.json | 48 + services/grafana/pom.xml | 2 +- services/greengrass/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/greengrassv2/pom.xml | 2 +- services/groundstation/pom.xml | 2 +- services/guardduty/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/health/pom.xml | 2 +- services/healthlake/pom.xml | 2 +- services/honeycode/pom.xml | 2 +- services/iam/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/identitystore/pom.xml | 2 +- services/imagebuilder/pom.xml | 2 +- services/inspector/pom.xml | 2 +- services/inspector2/pom.xml | 2 +- services/internetmonitor/pom.xml | 2 +- services/iot/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/iot1clickdevices/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/iot1clickprojects/pom.xml | 2 +- services/iotanalytics/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/iotdataplane/pom.xml | 2 +- services/iotdeviceadvisor/pom.xml | 2 +- services/iotevents/pom.xml | 2 +- services/ioteventsdata/pom.xml | 2 +- services/iotfleethub/pom.xml | 2 +- services/iotfleetwise/pom.xml | 2 +- services/iotjobsdataplane/pom.xml | 2 +- services/iotroborunner/pom.xml | 2 +- services/iotsecuretunneling/pom.xml | 2 +- services/iotsitewise/pom.xml | 2 +- services/iotthingsgraph/pom.xml | 2 +- services/iottwinmaker/pom.xml | 2 +- services/iotwireless/pom.xml | 2 +- services/ivs/pom.xml | 2 +- services/ivschat/pom.xml | 2 +- services/ivsrealtime/pom.xml | 2 +- services/kafka/pom.xml | 2 +- services/kafkaconnect/pom.xml | 2 +- services/kendra/pom.xml | 2 +- services/kendraranking/pom.xml | 2 +- services/keyspaces/pom.xml | 2 +- services/kinesis/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/kinesisanalytics/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/kinesisanalyticsv2/pom.xml | 2 +- services/kinesisvideo/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/kinesisvideoarchivedmedia/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/kinesisvideomedia/pom.xml | 2 +- services/kinesisvideosignaling/pom.xml | 2 +- services/kinesisvideowebrtcstorage/pom.xml | 2 +- services/kms/pom.xml | 2 +- services/lakeformation/pom.xml | 2 +- services/lambda/pom.xml | 2 +- services/lexmodelbuilding/pom.xml | 2 +- services/lexmodelsv2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 2 +- services/lexruntime/pom.xml | 2 +- services/lexruntimev2/pom.xml | 2 +- services/licensemanager/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../licensemanagerlinuxsubscriptions/pom.xml | 2 +- .../licensemanagerusersubscriptions/pom.xml | 2 +- services/lightsail/pom.xml | 2 +- services/location/pom.xml | 2 +- services/lookoutequipment/pom.xml | 2 +- services/lookoutmetrics/pom.xml | 2 +- services/lookoutvision/pom.xml | 2 +- services/m2/pom.xml | 2 +- services/machinelearning/pom.xml | 2 +- services/macie/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/macie2/pom.xml | 2 +- services/managedblockchain/pom.xml | 2 +- services/managedblockchainquery/pom.xml | 2 +- services/marketplacecatalog/pom.xml | 2 +- services/marketplacecommerceanalytics/pom.xml | 2 +- services/marketplaceentitlement/pom.xml | 2 +- services/marketplacemetering/pom.xml | 2 +- services/mediaconnect/pom.xml | 2 +- services/mediaconvert/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 400 +- .../codegen-resources/service-2.json | 114 +- services/medialive/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 45 + services/mediapackage/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/endpoint-tests.json | 50 + .../codegen-resources/service-2.json | 8 +- services/mediapackagev2/pom.xml | 2 +- services/mediapackagevod/pom.xml | 2 +- services/mediastore/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/mediastoredata/pom.xml | 8 +- .../codegen-resources/customization.config | 2 +- services/mediatailor/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 5 +- services/medicalimaging/pom.xml | 2 +- services/memorydb/pom.xml | 2 +- services/mgn/pom.xml | 2 +- services/migrationhub/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/migrationhubconfig/pom.xml | 2 +- services/migrationhuborchestrator/pom.xml | 2 +- services/migrationhubrefactorspaces/pom.xml | 2 +- services/migrationhubstrategy/pom.xml | 2 +- services/mobile/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/mq/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/mturk/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/mwaa/pom.xml | 2 +- services/neptune/pom.xml | 2 +- services/networkfirewall/pom.xml | 2 +- services/networkmanager/pom.xml | 2 +- services/nimble/pom.xml | 2 +- services/oam/pom.xml | 2 +- services/omics/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 12 + .../codegen-resources/service-2.json | 1039 ++++- .../codegen-resources/waiters-2.json | 48 + services/opensearch/pom.xml | 2 +- services/opensearchserverless/pom.xml | 2 +- services/opsworks/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/opsworkscm/pom.xml | 2 +- services/organizations/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 1295 ++---- .../codegen-resources/endpoint-tests.json | 148 +- .../codegen-resources/service-2.json | 86 +- services/osis/pom.xml | 2 +- services/outposts/pom.xml | 2 +- services/panorama/pom.xml | 2 +- services/paymentcryptography/pom.xml | 2 +- services/paymentcryptographydata/pom.xml | 2 +- services/personalize/pom.xml | 2 +- services/personalizeevents/pom.xml | 2 +- services/personalizeruntime/pom.xml | 2 +- services/pi/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/endpoint-tests.json | 50 + .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 636 ++- services/pinpoint/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/pinpointemail/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/pinpointsmsvoice/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/pinpointsmsvoicev2/pom.xml | 2 +- services/pipes/pom.xml | 2 +- services/polly/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 3 +- services/pom.xml | 2 +- services/pricing/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/privatenetworks/pom.xml | 2 +- services/proton/pom.xml | 2 +- services/qldb/pom.xml | 2 +- services/qldbsession/pom.xml | 2 +- services/quicksight/pom.xml | 2 +- .../codegen-resources/paginators-1.json | 30 + .../codegen-resources/service-2.json | 173 +- services/ram/pom.xml | 2 +- services/rbin/pom.xml | 2 +- services/rds/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/paginators-1.json | 6 + .../codegen-resources/service-2.json | 105 +- services/rdsdata/pom.xml | 2 +- services/redshift/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/redshiftdata/pom.xml | 2 +- services/redshiftserverless/pom.xml | 2 +- services/rekognition/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/resiliencehub/pom.xml | 2 +- services/resourceexplorer2/pom.xml | 2 +- services/resourcegroups/pom.xml | 2 +- services/resourcegroupstaggingapi/pom.xml | 2 +- services/robomaker/pom.xml | 2 +- services/rolesanywhere/pom.xml | 2 +- services/route53/pom.xml | 2 +- services/route53domains/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 44 +- services/route53recoverycluster/pom.xml | 2 +- services/route53recoverycontrolconfig/pom.xml | 2 +- services/route53recoveryreadiness/pom.xml | 2 +- services/route53resolver/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/rum/pom.xml | 2 +- services/s3/pom.xml | 7 +- .../s3/S3PresignerIntegrationTest.java | 33 + .../MultipartConfigurationResolver.java | 53 + .../multipart/MultipartS3AsyncClient.java | 20 +- .../multipart/UploadObjectHelper.java | 16 +- .../UploadWithKnownContentLengthHelper.java | 4 +- .../internal/signing/DefaultS3Presigner.java | 19 + .../s3/multipart/MultipartConfiguration.java | 16 +- .../services/s3/presigner/S3Presigner.java | 47 + .../model/DeleteObjectPresignRequest.java | 138 + .../model/PresignedDeleteObjectRequest.java | 107 + .../codegen-resources/endpoint-tests.json | 235 +- .../awssdk/services/s3/S3PresignerTest.java | 120 +- .../MultipartConfigurationResolverTest.java | 83 + .../multipart/UploadObjectHelperTest.java | 26 +- services/s3control/pom.xml | 2 +- .../EndpointAddressInterceptor.java | 223 - .../codegen-resources/endpoint-rule-set.json | 3669 ++++++++--------- .../codegen-resources/endpoint-tests.json | 196 +- .../EndpointAddressInterceptorTest.java | 323 -- services/s3outposts/pom.xml | 2 +- services/sagemaker/pom.xml | 2 +- .../codegen-resources/service-2.json | 17 +- services/sagemakera2iruntime/pom.xml | 2 +- services/sagemakeredge/pom.xml | 2 +- services/sagemakerfeaturestoreruntime/pom.xml | 2 +- services/sagemakergeospatial/pom.xml | 2 +- services/sagemakermetrics/pom.xml | 2 +- services/sagemakerruntime/pom.xml | 2 +- services/savingsplans/pom.xml | 2 +- services/scheduler/pom.xml | 2 +- services/schemas/pom.xml | 2 +- services/secretsmanager/pom.xml | 2 +- services/securityhub/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 93 +- services/securitylake/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 167 +- .../codegen-resources/service-2.json | 3 +- .../serverlessapplicationrepository/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/servicecatalog/pom.xml | 2 +- services/servicecatalogappregistry/pom.xml | 2 +- services/servicediscovery/pom.xml | 2 +- services/servicequotas/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 343 +- .../codegen-resources/endpoint-tests.json | 1288 +----- .../codegen-resources/service-2.json | 235 +- services/ses/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/sesv2/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/paginators-1.json | 5 + .../codegen-resources/service-2.json | 645 ++- services/sfn/pom.xml | 2 +- services/shield/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/signer/pom.xml | 2 +- services/simspaceweaver/pom.xml | 2 +- services/sms/pom.xml | 2 +- services/snowball/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/snowdevicemanagement/pom.xml | 2 +- services/sns/pom.xml | 2 +- services/sqs/pom.xml | 2 +- services/ssm/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/ssmcontacts/pom.xml | 2 +- services/ssmincidents/pom.xml | 2 +- services/ssmsap/pom.xml | 2 +- services/sso/pom.xml | 2 +- services/ssoadmin/pom.xml | 2 +- services/ssooidc/pom.xml | 2 +- services/storagegateway/pom.xml | 2 +- services/sts/pom.xml | 2 +- services/support/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/supportapp/pom.xml | 2 +- services/swf/pom.xml | 2 +- services/synthetics/pom.xml | 2 +- services/textract/pom.xml | 2 +- services/timestreamquery/pom.xml | 2 +- services/timestreamwrite/pom.xml | 2 +- services/tnb/pom.xml | 2 +- services/transcribe/pom.xml | 2 +- services/transcribestreaming/pom.xml | 2 +- services/transfer/pom.xml | 2 +- .../codegen-resources/service-2.json | 4 +- services/translate/pom.xml | 2 +- services/verifiedpermissions/pom.xml | 2 +- .../codegen-resources/endpoint-tests.json | 161 +- .../codegen-resources/service-2.json | 97 +- services/voiceid/pom.xml | 2 +- services/vpclattice/pom.xml | 2 +- services/waf/pom.xml | 2 +- .../waf/customization.config | 2 +- .../wafregional/customization.config | 2 +- services/wafv2/pom.xml | 2 +- services/wellarchitected/pom.xml | 2 +- services/wisdom/pom.xml | 2 +- services/workdocs/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/worklink/pom.xml | 2 +- services/workmail/pom.xml | 2 +- services/workmailmessageflow/pom.xml | 2 +- services/workspaces/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- services/workspacesweb/pom.xml | 2 +- .../codegen-resources/endpoint-rule-set.json | 344 +- .../codegen-resources/service-2.json | 111 +- services/xray/pom.xml | 2 +- .../codegen-resources/customization.config | 2 +- test/auth-tests/pom.xml | 2 +- test/codegen-generated-classes-test/pom.xml | 2 +- .../services/metrics/CoreMetricsTest.java | 34 + .../async/BaseAsyncCoreMetricsTest.java | 2 + test/http-client-tests/pom.xml | 2 +- test/module-path-tests/pom.xml | 2 +- test/protocol-tests-core/pom.xml | 2 +- test/protocol-tests/pom.xml | 2 +- test/region-testing/pom.xml | 2 +- test/ruleset-testing-core/pom.xml | 2 +- test/s3-benchmarks/.scripts/benchmark | 2 +- .../.scripts/create_benchmark_files | 25 +- test/s3-benchmarks/README.md | 28 +- test/s3-benchmarks/pom.xml | 12 +- .../BaseJavaS3ClientBenchmark.java | 130 + .../BaseTransferManagerBenchmark.java | 22 +- .../awssdk/s3benchmarks/BenchmarkRunner.java | 43 +- .../JavaS3ClientCopyBenchmark.java | 44 + .../JavaS3ClientUploadBenchmark.java | 95 + .../TransferManagerBenchmark.java | 27 + .../TransferManagerBenchmarkConfig.java | 80 +- .../src/main/resources/log4j2.properties | 3 + test/sdk-benchmarks/pom.xml | 9 +- .../benchmark/BenchmarkResultProcessor.java | 24 +- .../awssdk/benchmark/BenchmarkRunner.java | 94 +- .../benchmark/stats/SdkBenchmarkParams.java | 27 +- .../utils/BenchmarkProcessorOutput.java | 44 + .../amazon/awssdk/benchmark/baseline.json | 526 ++- test/sdk-native-image-test/pom.xml | 2 +- test/service-test-utils/pom.xml | 2 +- test/stability-tests/pom.xml | 2 +- test/test-utils/pom.xml | 2 +- test/tests-coverage-reporting/pom.xml | 2 +- third-party/pom.xml | 2 +- third-party/third-party-jackson-core/pom.xml | 2 +- .../pom.xml | 2 +- utils/pom.xml | 2 +- .../amazon/awssdk/utils/Validate.java | 13 + .../amazon/awssdk/utils/ValidateTest.java | 14 + 661 files changed, 15698 insertions(+), 13753 deletions(-) create mode 100644 .changes/2.20.126.json create mode 100644 .changes/2.20.127.json create mode 100644 .changes/2.20.128.json create mode 100644 .changes/2.20.129.json create mode 100644 .changes/2.20.130.json create mode 100644 .changes/2.20.131.json create mode 100644 .changes/2.20.132.json create mode 100644 .changes/2.20.133.json create mode 100644 .changes/2.20.134.json create mode 100644 .changes/2.20.135.json create mode 100644 .changes/2.20.136.json create mode 100644 .changes/2.20.137.json create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/FileRequestBodyConfiguration.java create mode 100644 core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelper.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/FileRequestBodyConfigurationTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelperTest.java create mode 100644 core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTestUtils.java delete mode 100644 services/firehose/src/it/java/software/amazon/awssdk/services/firehose/ServiceIntegrationTest.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolver.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/DeleteObjectPresignRequest.java create mode 100644 services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedDeleteObjectRequest.java create mode 100644 services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolverTest.java delete mode 100644 services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java delete mode 100644 services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java create mode 100644 test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseJavaS3ClientBenchmark.java create mode 100644 test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientCopyBenchmark.java create mode 100644 test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientUploadBenchmark.java create mode 100644 test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/utils/BenchmarkProcessorOutput.java diff --git a/.all-contributorsrc b/.all-contributorsrc index dac08ca7d53..f17f54ab71d 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -841,6 +841,24 @@ "contributions": [ "code" ] + }, + { + "login": "chadlwilson", + "name": "Chad Wilson", + "avatar_url": "https://avatars.githubusercontent.com/u/29788154?v=4", + "profile": "https://www.buymeacoffee.com/chadwilson", + "contributions": [ + "code" + ] + }, + { + "login": "ManishDait", + "name": "Manish Dait", + "avatar_url": "https://avatars.githubusercontent.com/u/90558243?v=4", + "profile": "https://github.com/ManishDait", + "contributions": [ + "doc" + ] } ], "contributorsPerLine": 7, diff --git a/.changes/2.20.126.json b/.changes/2.20.126.json new file mode 100644 index 00000000000..7e7b7059522 --- /dev/null +++ b/.changes/2.20.126.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.126", + "date": "2023-08-14", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaPackage", + "contributor": "", + "description": "Fix SDK logging of certain fields." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "Documentation updates for AWS Transfer Family" + }, + { + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "This release provides support for annotation store versioning and cross account sharing for Omics Analytics" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.127.json b/.changes/2.20.127.json new file mode 100644 index 00000000000..9f34e91bbfc --- /dev/null +++ b/.changes/2.20.127.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.127", + "date": "2023-08-15", + "entries": [ + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "AWS Glue Crawlers can now accept SerDe overrides from a custom csv classifier. The two SerDe options are LazySimpleSerDe and OpenCSVSerDe. In case, the user wants crawler to do the selection, \"None\" can be selected for this purpose." + }, + { + "type": "feature", + "category": "AWS Performance Insights", + "contributor": "", + "description": "AWS Performance Insights for Amazon RDS is launching Performance Analysis On Demand, a new feature that allows you to analyze database performance metrics and find out the performance issues. You can now use SDK to create, list, get, delete, and manage tags of performance analysis reports." + }, + { + "type": "feature", + "category": "Amazon Chime SDK Meetings", + "contributor": "", + "description": "Updated API documentation to include additional exceptions." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Documentation updates for Elastic Compute Cloud (EC2)." + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "contributor": "", + "description": "Provide explanation if CheckDomainTransferability return false. Provide requestId if a request is already submitted. Add sensitive protection for customer information" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "SageMaker Inference Recommender now provides SupportedResponseMIMETypes from DescribeInferenceRecommendationsJob response" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.128.json b/.changes/2.20.128.json new file mode 100644 index 00000000000..d8277286828 --- /dev/null +++ b/.changes/2.20.128.json @@ -0,0 +1,24 @@ +{ + "version": "2.20.128", + "date": "2023-08-16", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for the `SERVICE_ENDPOINT` metric. This metric represents the endpoint (scheme and authority) that the request was sent to." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "Doc-only update to incorporate several doc bug fixes" + }, + { + "type": "feature", + "category": "Amazon Lex Model Building V2", + "contributor": "", + "description": "This release updates the Custom Vocabulary Weight field to support a value of 0." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.129.json b/.changes/2.20.129.json new file mode 100644 index 00000000000..a3508927538 --- /dev/null +++ b/.changes/2.20.129.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.129", + "date": "2023-08-17", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Sends final checksum chunk and trailer when only onComplete() is called by upstream (empty content)" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Adds support for SubnetConfigurations to allow users to select their own IPv4 and IPv6 addresses for Interface VPC endpoints" + }, + { + "type": "feature", + "category": "Amazon GameLift", + "contributor": "", + "description": "Amazon GameLift updates its instance types support." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.130.json b/.changes/2.20.130.json new file mode 100644 index 00000000000..a8140fe8ea8 --- /dev/null +++ b/.changes/2.20.130.json @@ -0,0 +1,24 @@ +{ + "version": "2.20.130", + "date": "2023-08-18", + "entries": [ + { + "type": "feature", + "category": "AWS CodeCommit", + "contributor": "", + "description": "Add new ListFileCommitHistory operation to retrieve commits which introduced changes to a specific file." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Added Inspector Lambda code Vulnerability section to ASFF, including GeneratorDetails, EpssScore, ExploitAvailable, and CodeVulnerabilities." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.131.json b/.changes/2.20.131.json new file mode 100644 index 00000000000..64dbe75ffac --- /dev/null +++ b/.changes/2.20.131.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.131", + "date": "2023-08-21", + "entries": [ + { + "type": "feature", + "category": "AWS Cloud9", + "contributor": "", + "description": "Doc only update to add Ubuntu 22.04 as an Image ID option for Cloud9" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "The DeleteKeyPair API has been updated to return the keyPairId when an existing key pair is deleted." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adding support for RDS Aurora Global Database Unplanned Failover" + }, + { + "type": "feature", + "category": "Amazon Route 53 Domains", + "contributor": "", + "description": "Fixed typos in description fields" + }, + { + "type": "feature", + "category": "FinSpace User Environment Management service", + "contributor": "", + "description": "Allow customers to manage outbound traffic from their Kx Environment when attaching a transit gateway by providing network acl entries. Allow the customer to choose how they want to update the databases on a cluster allowing updates to possibly be faster than usual." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.132.json b/.changes/2.20.132.json new file mode 100644 index 00000000000..81e001df306 --- /dev/null +++ b/.changes/2.20.132.json @@ -0,0 +1,42 @@ +{ + "version": "2.20.132", + "date": "2023-08-22", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2 - DynamoDb Enhanced", + "contributor": "", + "description": "Changes the default behavior of the DynamoDb Enhanced atomic counter extension to automatically filter out any counter attributes in the item to be updated. This allows users to read and update items without DynamoDb collision errors." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "This release adds the LastUpdatedDate and LastUsedDate timestamps to help you manage your cost allocation tags." + }, + { + "type": "feature", + "category": "AWS Global Accelerator", + "contributor": "", + "description": "Global Accelerator now supports Client Ip Preservation for Network Load Balancer endpoints." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adding parameters to CreateCustomDbEngineVersion reserved for future use." + }, + { + "type": "feature", + "category": "Amazon Verified Permissions", + "contributor": "", + "description": "Documentation updates for Amazon Verified Permissions. Increases max results per page for ListPolicyStores, ListPolicies, and ListPolicyTemplates APIs from 20 to 50." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.133.json b/.changes/2.20.133.json new file mode 100644 index 00000000000..44b68082fe9 --- /dev/null +++ b/.changes/2.20.133.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.133", + "date": "2023-08-23", + "entries": [ + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "This release adds RootResourceId to GetRestApi response." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Marking fields as sensitive on BundleTask and GetPasswordData" + }, + { + "type": "feature", + "category": "Amazon Polly", + "contributor": "", + "description": "Amazon Polly adds 1 new voice - Zayd (ar-AE)" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.134.json b/.changes/2.20.134.json new file mode 100644 index 00000000000..2f57f1afbed --- /dev/null +++ b/.changes/2.20.134.json @@ -0,0 +1,78 @@ +{ + "version": "2.20.134", + "date": "2023-08-24", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Added API attributes that help in the monitoring of sessions." + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "contributor": "", + "description": "Adds new source location AUTODETECT_SIGV4 access type." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances." + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release updates the supported versions for Percona XtraBackup in Aurora MySQL." + }, + { + "type": "feature", + "category": "Amazon S3", + "contributor": "", + "description": "Add support for presigned `DeleteObject` in `S3Presigner`." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Updates to endpoint ruleset tests to address Smithy validation issues." + }, + { + "type": "feature", + "category": "Amazon Verified Permissions", + "contributor": "", + "description": "Documentation updates for Amazon Verified Permissions." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.135.json b/.changes/2.20.135.json new file mode 100644 index 00000000000..f3eb1ccfd4a --- /dev/null +++ b/.changes/2.20.135.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.135", + "date": "2023-08-25", + "entries": [ + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "Add ThrottlingException with error code 429 to handle CloudTrail Delegated Admin request rate exceeded on organization resources." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "Doc-only update to get doc bug fixes into the SDK docs" + }, + { + "type": "feature", + "category": "Amazon Detective", + "contributor": "", + "description": "Added protections to interacting with fields containing customer information." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.136.json b/.changes/2.20.136.json new file mode 100644 index 00000000000..8ab12fa0184 --- /dev/null +++ b/.changes/2.20.136.json @@ -0,0 +1,48 @@ +{ + "version": "2.20.136", + "date": "2023-08-28", + "entries": [ + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "Add support for customizing time zone for backup window in backup plan rules." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "This release enables AWS Compute Optimizer to analyze and generate licensing optimization recommendations for sql server running on EC2 instances." + }, + { + "type": "feature", + "category": "AWS Organizations", + "contributor": "", + "description": "Documentation updates for permissions and links." + }, + { + "type": "feature", + "category": "Amazon Security Lake", + "contributor": "", + "description": "Remove incorrect regex enforcement on pagination tokens." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces Web", + "contributor": "", + "description": "WorkSpaces Web now enables Admins to configure which cookies are synchronized from an end-user's local browser to the in-session browser. In conjunction with a browser extension, this feature enables enhanced Single-Sign On capability by reducing the number of times an end-user has to authenticate." + }, + { + "type": "feature", + "category": "Service Quotas", + "contributor": "", + "description": "Service Quotas now supports viewing the applied quota value and requesting a quota increase for a specific resource in an AWS account." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.20.137.json b/.changes/2.20.137.json new file mode 100644 index 00000000000..c28afa9ea9f --- /dev/null +++ b/.changes/2.20.137.json @@ -0,0 +1,30 @@ +{ + "version": "2.20.137", + "date": "2023-08-29", + "entries": [ + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "Added API example requests and responses for several operations. Fixed the validation regex for user pools Identity Provider name." + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "Documentation updates for project quotas." + }, + { + "type": "feature", + "category": "Amazon Omics", + "contributor": "", + "description": "Add RetentionMode support for Runs." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "contributor": "", + "description": "Adds support for the new Export and Message Insights features: create, get, list and cancel export jobs; get message insights." + } + ] +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e409994228..1917a957f70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,270 @@ +# __2.20.137__ __2023-08-29__ +## __Amazon Cognito Identity Provider__ + - ### Features + - Added API example requests and responses for several operations. Fixed the validation regex for user pools Identity Provider name. + +## __Amazon FSx__ + - ### Features + - Documentation updates for project quotas. + +## __Amazon Omics__ + - ### Features + - Add RetentionMode support for Runs. + +## __Amazon Simple Email Service__ + - ### Features + - Adds support for the new Export and Message Insights features: create, get, list and cancel export jobs; get message insights. + +# __2.20.136__ __2023-08-28__ +## __AWS Backup__ + - ### Features + - Add support for customizing time zone for backup window in backup plan rules. + +## __AWS Compute Optimizer__ + - ### Features + - This release enables AWS Compute Optimizer to analyze and generate licensing optimization recommendations for sql server running on EC2 instances. + +## __AWS Organizations__ + - ### Features + - Documentation updates for permissions and links. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Security Lake__ + - ### Features + - Remove incorrect regex enforcement on pagination tokens. + +## __Amazon WorkSpaces Web__ + - ### Features + - WorkSpaces Web now enables Admins to configure which cookies are synchronized from an end-user's local browser to the in-session browser. In conjunction with a browser extension, this feature enables enhanced Single-Sign On capability by reducing the number of times an end-user has to authenticate. + +## __Service Quotas__ + - ### Features + - Service Quotas now supports viewing the applied quota value and requesting a quota increase for a specific resource in an AWS account. + +# __2.20.135__ __2023-08-25__ +## __AWS CloudTrail__ + - ### Features + - Add ThrottlingException with error code 429 to handle CloudTrail Delegated Admin request rate exceeded on organization resources. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon CloudWatch__ + - ### Features + - Doc-only update to get doc bug fixes into the SDK docs + +## __Amazon Detective__ + - ### Features + - Added protections to interacting with fields containing customer information. + +# __2.20.134__ __2023-08-24__ +## __AWS Elemental MediaConvert__ + - ### Features + - This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class. + +## __AWS Elemental MediaLive__ + - ### Features + - MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings. + +## __AWS Glue__ + - ### Features + - Added API attributes that help in the monitoring of sessions. + +## __AWS MediaTailor__ + - ### Features + - Adds new source location AUTODETECT_SIGV4 access type. + +## __AWS S3 Control__ + - ### Features + - Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances. + +## __Amazon QuickSight__ + - ### Features + - Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support. + +## __Amazon Relational Database Service__ + - ### Features + - This release updates the supported versions for Percona XtraBackup in Aurora MySQL. + +## __Amazon S3__ + - ### Features + - Add support for presigned `DeleteObject` in `S3Presigner`. + +## __Amazon Simple Storage Service__ + - ### Features + - Updates to endpoint ruleset tests to address Smithy validation issues. + +## __Amazon Verified Permissions__ + - ### Features + - Documentation updates for Amazon Verified Permissions. + +# __2.20.133__ __2023-08-23__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon API Gateway__ + - ### Features + - This release adds RootResourceId to GetRestApi response. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Marking fields as sensitive on BundleTask and GetPasswordData + +## __Amazon Polly__ + - ### Features + - Amazon Polly adds 1 new voice - Zayd (ar-AE) + +# __2.20.132__ __2023-08-22__ +## __AWS Cost Explorer Service__ + - ### Features + - This release adds the LastUpdatedDate and LastUsedDate timestamps to help you manage your cost allocation tags. + +## __AWS Global Accelerator__ + - ### Features + - Global Accelerator now supports Client Ip Preservation for Network Load Balancer endpoints. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SDK for Java v2 - DynamoDb Enhanced__ + - ### Bugfixes + - Changes the default behavior of the DynamoDb Enhanced atomic counter extension to automatically filter out any counter attributes in the item to be updated. This allows users to read and update items without DynamoDb collision errors. + +## __Amazon Relational Database Service__ + - ### Features + - Adding parameters to CreateCustomDbEngineVersion reserved for future use. + +## __Amazon Verified Permissions__ + - ### Features + - Documentation updates for Amazon Verified Permissions. Increases max results per page for ListPolicyStores, ListPolicies, and ListPolicyTemplates APIs from 20 to 50. + +# __2.20.131__ __2023-08-21__ +## __AWS Cloud9__ + - ### Features + - Doc only update to add Ubuntu 22.04 as an Image ID option for Cloud9 + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - The DeleteKeyPair API has been updated to return the keyPairId when an existing key pair is deleted. + +## __Amazon Relational Database Service__ + - ### Features + - Adding support for RDS Aurora Global Database Unplanned Failover + +## __Amazon Route 53 Domains__ + - ### Features + - Fixed typos in description fields + +## __FinSpace User Environment Management service__ + - ### Features + - Allow customers to manage outbound traffic from their Kx Environment when attaching a transit gateway by providing network acl entries. Allow the customer to choose how they want to update the databases on a cluster allowing updates to possibly be faster than usual. + +# __2.20.130__ __2023-08-18__ +## __AWS CodeCommit__ + - ### Features + - Add new ListFileCommitHistory operation to retrieve commits which introduced changes to a specific file. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS SecurityHub__ + - ### Features + - Added Inspector Lambda code Vulnerability section to ASFF, including GeneratorDetails, EpssScore, ExploitAvailable, and CodeVulnerabilities. + +# __2.20.129__ __2023-08-17__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Sends final checksum chunk and trailer when only onComplete() is called by upstream (empty content) + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Adds support for SubnetConfigurations to allow users to select their own IPv4 and IPv6 addresses for Interface VPC endpoints + +## __Amazon GameLift__ + - ### Features + - Amazon GameLift updates its instance types support. + +# __2.20.128__ __2023-08-16__ +## __AWS SDK for Java v2__ + - ### Bugfixes + - Add support for the `SERVICE_ENDPOINT` metric. This metric represents the endpoint (scheme and authority) that the request was sent to. + +## __Amazon CloudWatch__ + - ### Features + - Doc-only update to incorporate several doc bug fixes + +## __Amazon Lex Model Building V2__ + - ### Features + - This release updates the Custom Vocabulary Weight field to support a value of 0. + +# __2.20.127__ __2023-08-15__ +## __AWS Glue__ + - ### Features + - AWS Glue Crawlers can now accept SerDe overrides from a custom csv classifier. The two SerDe options are LazySimpleSerDe and OpenCSVSerDe. In case, the user wants crawler to do the selection, "None" can be selected for this purpose. + +## __AWS Performance Insights__ + - ### Features + - AWS Performance Insights for Amazon RDS is launching Performance Analysis On Demand, a new feature that allows you to analyze database performance metrics and find out the performance issues. You can now use SDK to create, list, get, delete, and manage tags of performance analysis reports. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Chime SDK Meetings__ + - ### Features + - Updated API documentation to include additional exceptions. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Documentation updates for Elastic Compute Cloud (EC2). + +## __Amazon Route 53 Domains__ + - ### Features + - Provide explanation if CheckDomainTransferability return false. Provide requestId if a request is already submitted. Add sensitive protection for customer information + +## __Amazon SageMaker Service__ + - ### Features + - SageMaker Inference Recommender now provides SupportedResponseMIMETypes from DescribeInferenceRecommendationsJob response + +# __2.20.126__ __2023-08-14__ +## __AWS Elemental MediaPackage__ + - ### Features + - Fix SDK logging of certain fields. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS Transfer Family__ + - ### Features + - Documentation updates for AWS Transfer Family + +## __Amazon Omics__ + - ### Features + - This release provides support for annotation store versioning and cross account sharing for Omics Analytics + # __2.20.125__ __2023-08-11__ ## __AWS Config__ - ### Features @@ -1184,7 +1451,7 @@ Special thanks to the following contributors to this release: ## __Contributors__ Special thanks to the following contributors to this release: -[@breader124](https://github.com/breader124), [@bmaizels](https://github.com/bmaizels) +[@bmaizels](https://github.com/bmaizels), [@breader124](https://github.com/breader124) # __2.20.85__ __2023-06-13__ ## __AWS CloudTrail__ - ### Features diff --git a/README.md b/README.md index e5589f0ea26..e2c817c9978 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Gitter](https://badges.gitter.im/aws/aws-sdk-java-v2.svg)](https://gitter.im/aws/aws-sdk-java-v2?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![codecov](https://codecov.io/gh/aws/aws-sdk-java-v2/branch/master/graph/badge.svg)](https://codecov.io/gh/aws/aws-sdk-java-v2) -[![All Contributors](https://img.shields.io/badge/all_contributors-93-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-95-orange.svg?style=flat-square)](#contributors-) The **AWS SDK for Java 2.0** is a rewrite of 1.0 with some great new features. As with version 1.0, @@ -52,7 +52,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.20.125 + 2.20.137 pom import @@ -86,12 +86,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.20.125 + 2.20.137 software.amazon.awssdk s3 - 2.20.125 + 2.20.137 ``` @@ -103,7 +103,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.20.125 + 2.20.137 ``` @@ -311,6 +311,8 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Olivier L Applin
    Olivier L Applin

    💻 Adrian Chlebosz
    Adrian Chlebosz

    💻 + Chad Wilson
    Chad Wilson

    💻 + Manish Dait
    Manish Dait

    📖 diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 3f1e05ce2f6..44739a2c1eb 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index 2f26b4591b6..1f7a91fb89d 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index ba43c242541..e089bbc79c2 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/archetypes/pom.xml b/archetypes/pom.xml index 8948948ed7d..d9902c87af3 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index 9330b8937e7..99d41e2180d 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../pom.xml aws-sdk-java diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index 4cc8aebcb6e..56e1251f54b 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/bom/pom.xml b/bom/pom.xml index a8f66acbe38..78f20333086 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../pom.xml bom @@ -167,6 +167,11 @@ aws-crt-client ${awsjavasdk.version} + + software.amazon.awssdk + auth-crt + ${awsjavasdk.version} + software.amazon.awssdk iam-policy-builder diff --git a/bundle/pom.xml b/bundle/pom.xml index 4f39da56f12..ee15a948ecf 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 98612d5457c..dd82e06bd65 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index e2bc275177f..c5f86eaaf98 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index 7ac847882e3..6a132cba03d 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../pom.xml codegen-maven-plugin diff --git a/codegen/pom.xml b/codegen/pom.xml index f0c2a0650b4..110244b0f99 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codegen AWS Java SDK :: Code Generator diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java b/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java index 66df8fe572b..74374b40cd3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/IntermediateModelBuilder.java @@ -253,14 +253,16 @@ private void setSimpleMethods(IntermediateModel model) { } else { inputShape.setSimpleMethod(false); - boolean methodIsNotBlacklisted = !config.getBlacklistedSimpleMethods().contains(methodName) || - config.getBlacklistedSimpleMethods().stream().noneMatch(m -> m.equals("*")); + boolean methodIsNotExcluded = !config.getExcludedSimpleMethods().contains(methodName) || + config.getExcludedSimpleMethods().stream().noneMatch(m -> m.equals("*")) || + !config.getBlacklistedSimpleMethods().contains(methodName) || + config.getBlacklistedSimpleMethods().stream().noneMatch(m -> m.equals("*")); boolean methodHasNoRequiredMembers = !CollectionUtils.isNullOrEmpty(inputShape.getRequired()); boolean methodIsNotStreaming = !operation.isStreaming(); boolean methodHasSimpleMethodVerb = methodName.matches(Constant.APPROVED_SIMPLE_METHOD_VERBS); - if (methodIsNotBlacklisted && methodHasNoRequiredMembers && methodIsNotStreaming && methodHasSimpleMethodVerb) { - log.warn("A potential simple method exists that isn't whitelisted or blacklisted: " + methodName); + if (methodIsNotExcluded && methodHasNoRequiredMembers && methodIsNotStreaming && methodHasSimpleMethodVerb) { + log.warn("A potential simple method exists that isn't explicitly excluded or included: " + methodName); } } }); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java b/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java index 65dc3346fdd..d78e37b6c3b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/docs/OperationDocProvider.java @@ -18,6 +18,7 @@ import static software.amazon.awssdk.codegen.internal.DocumentationUtils.createLinkToServiceDocumentation; import static software.amazon.awssdk.codegen.internal.DocumentationUtils.stripHtmlTags; +import com.squareup.javapoet.ClassName; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -171,10 +172,13 @@ final List> getThrows() { final void emitRequestParm(DocumentationBuilder docBuilder) { String parameterDocs = stripHtmlTags(opModel.getInput().getDocumentation()); + String shapeName = opModel.getInputShape().getShapeName(); + ClassName fcqn = ClassName.get(model.getMetadata().getFullModelPackageName(), shapeName); + if (config.isConsumerBuilder()) { docBuilder.param(opModel.getInput().getVariableName(), "A {@link Consumer} that will call methods on {@link %s.Builder} to create a request. %s", - opModel.getInputShape().getC2jName(), + fcqn.toString(), parameterDocs); } else { docBuilder.param(opModel.getInput().getVariableName(), parameterDocs); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index 0bef67df786..f54d5db2332 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -103,6 +103,13 @@ public class CustomizationConfig { /** * APIs that have no required arguments in their model but can't be called via a simple method */ + private List excludedSimpleMethods = new ArrayList<>(); + + /** + * APIs that have no required arguments in their model but can't be called via a simple method. + * Superseded by {@link #excludedSimpleMethods} + */ + @Deprecated private List blacklistedSimpleMethods = new ArrayList<>(); /** @@ -373,10 +380,26 @@ public void setServiceSpecificHttpConfig(String serviceSpecificHttpConfig) { this.serviceSpecificHttpConfig = serviceSpecificHttpConfig; } + public List getExcludedSimpleMethods() { + return excludedSimpleMethods; + } + + public void setExcludedSimpleMethods(List excludedSimpleMethods) { + this.excludedSimpleMethods = excludedSimpleMethods; + } + + /** + * Use {@link #getExcludedSimpleMethods()} + */ + @Deprecated public List getBlacklistedSimpleMethods() { return blacklistedSimpleMethods; } + /** + * Use {@link #setExcludedSimpleMethods(List)} + */ + @Deprecated public void setBlacklistedSimpleMethods(List blackListedSimpleMethods) { this.blacklistedSimpleMethods = blackListedSimpleMethods; } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config index af7f5cb4564..a54af7a0a2c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json/customization.config @@ -11,7 +11,7 @@ }, "customRetryPolicy": "software.amazon.MyServiceRetryPolicy", "verifiedSimpleMethods" : ["paginatedOperationWithResultKey"], - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "eventStreamOperation" ], "utilitiesMethod": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config index e13cec317ff..32b15896ce5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rest-json/customization.config @@ -15,7 +15,7 @@ }, "customRetryPolicy": "software.amazon.MyServiceRetryPolicy", "verifiedSimpleMethods" : ["paginatedOperationWithResultKey"], - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "eventStreamOperation" ], "utilitiesMethod": { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java index 7beab5fe6ab..76469501f82 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-interface.java @@ -117,7 +117,8 @@ default CompletableFuture aPostOperation(APostOperationR *

    * * @param aPostOperationRequest - * A {@link Consumer} that will call methods on {@link APostOperationRequest.Builder} to create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.APostOperationRequest.Builder} to create a request. * @return A Java Future containing the result of the APostOperation operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. @@ -181,7 +182,8 @@ default CompletableFuture aPostOperationWithOu *

    * * @param aPostOperationWithOutputRequest - * A {@link Consumer} that will call methods on {@link APostOperationWithOutputRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest.Builder} to create a * request. * @return A Java Future containing the result of the APostOperationWithOutput operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -238,8 +240,8 @@ default CompletableFuture bearerAuthOperation( *

    * * @param bearerAuthOperationRequest - * A {@link Consumer} that will call methods on {@link BearerAuthOperationRequest.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.BearerAuthOperationRequest.Builder} to create a request. * @return A Java Future containing the result of the BearerAuthOperation operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. @@ -292,7 +294,8 @@ default CompletableFuture eventStreamOperation(EventStreamOperationRequest *

    * * @param eventStreamOperationRequest - * A {@link Consumer} that will call methods on {@link EventStreamOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.EventStreamOperationRequest.Builder} to create a * request. * @return A Java Future containing the result of the EventStreamOperation operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -350,7 +353,8 @@ default CompletableFuture eventStream *

    * * @param eventStreamOperationWithOnlyInputRequest - * A {@link Consumer} that will call methods on {@link EventStreamOperationWithOnlyInputRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.EventStreamOperationWithOnlyInputRequest.Builder} to * create a request. * @return A Java Future containing the result of the EventStreamOperationWithOnlyInput operation returned by the * service.
    @@ -410,7 +414,8 @@ default CompletableFuture eventStreamOperationWithOnlyOutput( *

    * * @param eventStreamOperationWithOnlyOutputRequest - * A {@link Consumer} that will call methods on {@link EventStreamOperationWithOnlyOutputRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.EventStreamOperationWithOnlyOutputRequest.Builder} to * create a request. * @return A Java Future containing the result of the EventStreamOperationWithOnlyOutput operation returned by the * service.
    @@ -468,7 +473,9 @@ default CompletableFuture getOperationWithChec *

    * * @param getOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructure.Builder} to create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.GetOperationWithChecksumRequest.Builder} to create a + * request. * @return A Java Future containing the result of the GetOperationWithChecksum operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. @@ -529,7 +536,8 @@ default CompletableFuture getWithoutRequiredM *

    * * @param getWithoutRequiredMembersRequest - * A {@link Consumer} that will call methods on {@link GetWithoutRequiredMembersRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetWithoutRequiredMembers operation returned by the service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -587,8 +595,9 @@ default CompletableFuture operationWithCh *

    * * @param operationWithChecksumRequiredRequest - * A {@link Consumer} that will call methods on {@link OperationWithChecksumRequiredRequest.Builder} to - * create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest.Builder} to create + * a request. * @return A Java Future containing the result of the OperationWithChecksumRequired operation returned by the * service.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -701,7 +710,8 @@ default CompletableFuture paginatedOper *

    * * @param paginatedOperationWithResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest.Builder} to * create a request. * @return A Java Future containing the result of the PaginatedOperationWithResultKey operation returned by the * service.
    @@ -961,7 +971,8 @@ default PaginatedOperationWithResultKeyPublisher paginatedOperationWithResultKey *

    * * @param paginatedOperationWithResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest.Builder} to * create a request. * @return A custom publisher that can be subscribed to request a stream of response pages.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -1017,7 +1028,8 @@ default CompletableFuture paginatedO *

    * * @param paginatedOperationWithoutResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithoutResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest.Builder} to * create a request. * @return A Java Future containing the result of the PaginatedOperationWithoutResultKey operation returned by the * service.
    @@ -1179,7 +1191,8 @@ default PaginatedOperationWithoutResultKeyPublisher paginatedOperationWithoutRes *

    * * @param paginatedOperationWithoutResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithoutResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest.Builder} to * create a request. * @return A custom publisher that can be subscribed to request a stream of response pages.
    * The CompletableFuture returned by this method can be completed exceptionally with the following @@ -1253,7 +1266,8 @@ default CompletableFuture putOperationWithChecksum( *

    * * @param putOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructureWithStreaming.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PutOperationWithChecksumRequest.Builder} to create a * request. * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The @@ -1345,7 +1359,8 @@ default CompletableFuture putOperationWithChec *

    * * @param putOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructureWithStreaming.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PutOperationWithChecksumRequest.Builder} to create a * request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read @@ -1423,7 +1438,8 @@ default CompletableFuture streamingInputOperati *

    * * @param streamingInputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOperationRequest.Builder} to create a * request. * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The @@ -1488,7 +1504,8 @@ default CompletableFuture streamingInputOperati *

    * * @param streamingInputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOperationRequest.Builder} to create a * request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read @@ -1559,8 +1576,9 @@ default CompletableFuture streamingInputOutputOperation( *

    * * @param streamingInputOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOutputOperationRequest.Builder} to create + * a request. * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The * size of the content is expected to be known up front. See {@link AsyncRequestBody} for specific details on @@ -1637,8 +1655,9 @@ default CompletableFuture streamingInputO *

    * * @param streamingInputOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOutputOperationRequest.Builder} to create + * a request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read * multiple times in the event of a retry. If the file does not exist or the current user does not have @@ -1709,7 +1728,8 @@ default CompletableFuture streamingOutputOperation( *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @param asyncResponseTransformer * The response transformer for processing the streaming response in a non-blocking manner. See @@ -1774,7 +1794,8 @@ default CompletableFuture streamingOutputOpera *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @param destinationPath * {@link Path} to file that response contents will be written to. The file must not exist or this method diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java index e86ed6eee73..c0bd88e15d6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-interface.java @@ -100,7 +100,8 @@ default APostOperationResponse aPostOperation(APostOperationRequest aPostOperati *

    * * @param aPostOperationRequest - * A {@link Consumer} that will call methods on {@link APostOperationRequest.Builder} to create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.APostOperationRequest.Builder} to create a request. * @return Result of the APostOperation operation returned by the service. * @throws InvalidInputException * The request was rejected because an invalid or out-of-range value was supplied for an input parameter. @@ -160,7 +161,8 @@ default APostOperationWithOutputResponse aPostOperationWithOutput( *

    * * @param aPostOperationWithOutputRequest - * A {@link Consumer} that will call methods on {@link APostOperationWithOutputRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest.Builder} to create a * request. * @return Result of the APostOperationWithOutput operation returned by the service. * @throws InvalidInputException @@ -212,8 +214,8 @@ default BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReque *

    * * @param bearerAuthOperationRequest - * A {@link Consumer} that will call methods on {@link BearerAuthOperationRequest.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.BearerAuthOperationRequest.Builder} to create a request. * @return Result of the BearerAuthOperation operation returned by the service. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -262,7 +264,9 @@ default GetOperationWithChecksumResponse getOperationWithChecksum( *

    * * @param getOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructure.Builder} to create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.GetOperationWithChecksumRequest.Builder} to create a + * request. * @return Result of the GetOperationWithChecksum operation returned by the service. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -319,7 +323,8 @@ default GetWithoutRequiredMembersResponse getWithoutRequiredMembers( *

    * * @param getWithoutRequiredMembersRequest - * A {@link Consumer} that will call methods on {@link GetWithoutRequiredMembersRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.GetWithoutRequiredMembersRequest.Builder} to create a * request. * @return Result of the GetWithoutRequiredMembers operation returned by the service. * @throws InvalidInputException @@ -372,8 +377,9 @@ default OperationWithChecksumRequiredResponse operationWithChecksumRequired( *

    * * @param operationWithChecksumRequiredRequest - * A {@link Consumer} that will call methods on {@link OperationWithChecksumRequiredRequest.Builder} to - * create a request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.OperationWithChecksumRequiredRequest.Builder} to create + * a request. * @return Result of the OperationWithChecksumRequired operation returned by the service. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for @@ -474,7 +480,8 @@ default PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( *

    * * @param paginatedOperationWithResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest.Builder} to * create a request. * @return Result of the PaginatedOperationWithResultKey operation returned by the service. * @throws SdkException @@ -732,7 +739,8 @@ default PaginatedOperationWithResultKeyIterable paginatedOperationWithResultKeyP *

    * * @param paginatedOperationWithResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithResultKeyRequest.Builder} to * create a request. * @return A custom iterable that can be used to iterate through all the response pages. * @throws SdkException @@ -783,7 +791,8 @@ default PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResu *

    * * @param paginatedOperationWithoutResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithoutResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest.Builder} to * create a request. * @return Result of the PaginatedOperationWithoutResultKey operation returned by the service. * @throws SdkException @@ -944,7 +953,8 @@ default PaginatedOperationWithoutResultKeyIterable paginatedOperationWithoutResu *

    * * @param paginatedOperationWithoutResultKeyRequest - * A {@link Consumer} that will call methods on {@link PaginatedOperationWithoutResultKeyRequest.Builder} to + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PaginatedOperationWithoutResultKeyRequest.Builder} to * create a request. * @return A custom iterable that can be used to iterate through all the response pages. * @throws SdkException @@ -1021,7 +1031,8 @@ default ReturnT putOperationWithChecksum(PutOperationWithChecksumReque *

    * * @param putOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructureWithStreaming.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PutOperationWithChecksumRequest.Builder} to create a * request. * @param requestBody * The content to send to the service. A {@link RequestBody} can be created using one of several factory @@ -1119,7 +1130,8 @@ default PutOperationWithChecksumResponse putOperationWithChecksum( *

    * * @param putOperationWithChecksumRequest - * A {@link Consumer} that will call methods on {@link ChecksumStructureWithStreaming.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.PutOperationWithChecksumRequest.Builder} to create a * request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read @@ -1200,7 +1212,8 @@ default StreamingInputOperationResponse streamingInputOperation( *

    * * @param streamingInputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOperationRequest.Builder} to create a * request. * @param requestBody * The content to send to the service. A {@link RequestBody} can be created using one of several factory @@ -1268,7 +1281,8 @@ default StreamingInputOperationResponse streamingInputOperation( *

    * * @param streamingInputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOperationRequest.Builder} to create a * request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read @@ -1344,8 +1358,9 @@ default ReturnT streamingInputOutputOperation( *

    * * @param streamingInputOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOutputOperationRequest.Builder} to create + * a request. * @param requestBody * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the @@ -1427,8 +1442,9 @@ default StreamingInputOutputOperationResponse streamingInputOutputOperation( *

    * * @param streamingInputOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StructureWithStreamingMember.Builder} to create a - * request. + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingInputOutputOperationRequest.Builder} to create + * a request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read * multiple times in the event of a retry. If the file does not exist or the current user does not have @@ -1497,7 +1513,8 @@ default ReturnT streamingOutputOperation(StreamingOutputOperationReque *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @param responseTransformer * Functional interface for processing the streamed response content. The unmarshalled @@ -1561,7 +1578,8 @@ default StreamingOutputOperationResponse streamingOutputOperation( *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @param destinationPath * {@link Path} to file that response contents will be written to. The file must not exist or this method @@ -1623,7 +1641,8 @@ default ResponseInputStream streamingOutputOpe *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @return A {@link ResponseInputStream} containing data streamed from service. Note that this is an unmanaged * reference to the underlying HTTP connection so great care must be taken to ensure all data if fully read @@ -1684,7 +1703,8 @@ default ResponseBytes streamingOutputOperation *

    * * @param streamingOutputOperationRequest - * A {@link Consumer} that will call methods on {@link StreamingOutputOperationRequest.Builder} to create a + * A {@link Consumer} that will call methods on + * {@link software.amazon.awssdk.services.json.model.StreamingOutputOperationRequest.Builder} to create a * request. * @return A {@link ResponseBytes} that loads the data streamed from the service into memory and exposes it in * convenient in-memory representations like a byte buffer or string. The unmarshalled response object can diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config index e1206b3fd91..213183ce5a8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config index d3b7f5d49d4..73d3ee1594f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/customresponsemetadata/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/customization.config index 5e44eaa81f8..ced0f694b18 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customization.config index 78f3be1bf76..2f34e03ed04 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/paginators/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput" diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/transform/customization.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/transform/customization.config index b57099f2d79..e66eb09003a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/transform/customization.config +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/transform/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "allTypes", "nestedContainers", "operationWithNoInputOrOutput", diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index 38e32d10232..cb8f532114e 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 802774018d6..7ae92e2159c 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/auth-crt/pom.xml b/core/auth-crt/pom.xml index 91dc602039b..cd8bc6ca393 100644 --- a/core/auth-crt/pom.xml +++ b/core/auth-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT auth-crt diff --git a/core/auth/pom.xml b/core/auth/pom.xml index 2e8581589b6..2e7c957fc71 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT auth diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index f1911753386..9c722f320e7 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT aws-core diff --git a/core/crt-core/pom.xml b/core/crt-core/pom.xml index 626813252a3..c7bb6aabba6 100644 --- a/core/crt-core/pom.xml +++ b/core/crt-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT crt-core diff --git a/core/endpoints-spi/pom.xml b/core/endpoints-spi/pom.xml index d45d7b52509..a5cd1423396 100644 --- a/core/endpoints-spi/pom.xml +++ b/core/endpoints-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/imds/pom.xml b/core/imds/pom.xml index 3ec164ba79d..06cb42b9770 100644 --- a/core/imds/pom.xml +++ b/core/imds/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 imds diff --git a/core/json-utils/pom.xml b/core/json-utils/pom.xml index e262f9de0a9..37310ab2d92 100644 --- a/core/json-utils/pom.xml +++ b/core/json-utils/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index 2378f2e38ee..8f6a7258ccd 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index 342f3cf9395..3c49cb0a2cc 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 71fed9dd446..724db091382 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT profiles diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index 5b36c168871..328333de4a6 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index e934c54da81..ccc8fb36210 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index 84195df0201..4513147fabd 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index c1b5b3ef582..ed9386a5de6 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index f535b05c9b5..84992a057df 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index e29093bd76f..0f7fdd3c527 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index a87ad3caaba..6cd9a869c6b 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT regions diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index a686c926e99..c29cd48dfc2 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -375,6 +375,7 @@ "deprecated" : true, "hostname" : "acm-pca-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -521,6 +522,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -1066,6 +1068,7 @@ }, "endpoints" : { "ap-south-1" : { }, + "eu-central-1" : { }, "us-east-1" : { } } }, @@ -1610,6 +1613,12 @@ "tags" : [ "dualstack" ] } ] }, + "il-central-1" : { + "variants" : [ { + "hostname" : "appmesh.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "me-south-1" : { "variants" : [ { "hostname" : "appmesh.me-south-1.api.aws", @@ -2035,6 +2044,12 @@ "deprecated" : true, "hostname" : "athena-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { + "variants" : [ { + "hostname" : "athena.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "me-central-1" : { "variants" : [ { "hostname" : "athena.me-central-1.api.aws", @@ -2205,6 +2220,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2326,6 +2342,7 @@ "deprecated" : true, "hostname" : "fips.batch.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -4400,6 +4417,7 @@ "deprecated" : true, "hostname" : "datasync-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -6209,6 +6227,7 @@ "deprecated" : true, "hostname" : "email-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -6922,6 +6941,7 @@ "deprecated" : true, "hostname" : "fms-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { "variants" : [ { @@ -7168,6 +7188,7 @@ "deprecated" : true, "hostname" : "fsx-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "prod-ca-central-1" : { @@ -7673,6 +7694,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -8019,12 +8041,60 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-west-2.amazonaws.com" + }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "internetmonitor" : { @@ -9894,6 +9964,7 @@ "deprecated" : true, "hostname" : "license-manager-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -10442,9 +10513,11 @@ "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "mediaconvert-fips.ca-central-1.amazonaws.com", @@ -10853,6 +10926,7 @@ "deprecated" : true, "hostname" : "mgn-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -11349,7 +11423,18 @@ "credentialScope" : { "region" : "us-west-2" }, - "hostname" : "networkmanager.us-west-2.amazonaws.com" + "hostname" : "networkmanager.us-west-2.amazonaws.com", + "variants" : [ { + "hostname" : "networkmanager-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-global" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "networkmanager-fips.us-west-2.amazonaws.com" } }, "isRegionalized" : false, @@ -11727,6 +11812,8 @@ "deprecated" : true, "hostname" : "outposts-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -12880,6 +12967,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "il-central-1" : { }, "rekognition-fips.ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -14061,6 +14149,7 @@ "fips-us-west-2" : { "deprecated" : true }, + "il-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -14135,18 +14224,26 @@ }, "schemas" : { "endpoints" : { + "af-south-1" : { }, "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -14855,6 +14952,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -15945,6 +16043,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -16890,7 +16989,10 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -19142,6 +19244,12 @@ }, "isRegionalized" : true }, + "schemas" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "cn-north-1" : { }, @@ -21574,8 +21682,32 @@ }, "inspector2" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "inspector2-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "inspector2-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "internetmonitor" : { @@ -22281,6 +22413,17 @@ "credentialScope" : { "region" : "us-gov-west-1" }, + "hostname" : "networkmanager.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "networkmanager.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "fips-aws-us-gov-global" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, "hostname" : "networkmanager.us-gov-west-1.amazonaws.com" } }, @@ -23065,13 +23208,13 @@ }, "us-gov-east-1" : { "variants" : [ { - "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", - "tags" : [ "dualstack", "fips" ] - }, { "hostname" : "servicediscovery-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-gov-east-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-gov-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -23084,13 +23227,13 @@ }, "us-gov-west-1" : { "variants" : [ { - "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", - "tags" : [ "dualstack", "fips" ] - }, { "hostname" : "servicediscovery-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] }, { - "hostname" : "servicediscovery.us-gov-west-1.amazonaws.com", + "hostname" : "servicediscovery-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "servicediscovery.us-gov-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -24055,7 +24198,8 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { - "us-iso-east-1" : { } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "elasticache" : { @@ -24293,6 +24437,12 @@ "us-iso-west-1" : { } } }, + "resource-groups" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "route53" : { "endpoints" : { "aws-iso-global" : { @@ -24737,6 +24887,11 @@ "us-isob-east-1" : { } } }, + "outposts" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "ram" : { "endpoints" : { "us-isob-east-1" : { } diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index 5deffe013c6..a878dcfc61f 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/FileRequestBodyConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/FileRequestBodyConfiguration.java new file mode 100644 index 00000000000..07e7a98e424 --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/FileRequestBodyConfiguration.java @@ -0,0 +1,209 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import java.nio.file.Path; +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * Configuration options for {@link AsyncRequestBody#fromFile(FileRequestBodyConfiguration)} to configure how the SDK + * should read the file. + * + * @see #builder() + */ +@SdkPublicApi +public final class FileRequestBodyConfiguration implements ToCopyableBuilder { + private final Integer chunkSizeInBytes; + private final Long position; + private final Long numBytesToRead; + private final Path path; + + private FileRequestBodyConfiguration(DefaultBuilder builder) { + this.path = Validate.notNull(builder.path, "path"); + this.chunkSizeInBytes = Validate.isPositiveOrNull(builder.chunkSizeInBytes, "chunkSizeInBytes"); + this.position = Validate.isNotNegativeOrNull(builder.position, "position"); + this.numBytesToRead = Validate.isNotNegativeOrNull(builder.numBytesToRead, "numBytesToRead"); + } + + /** + * Create a {@link Builder}, used to create a {@link FileRequestBodyConfiguration}. + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * @return the size of each chunk to read from the file + */ + public Integer chunkSizeInBytes() { + return chunkSizeInBytes; + } + + /** + * @return the file position at which the request body begins. + */ + public Long position() { + return position; + } + + /** + * @return the number of bytes to read from this file. + */ + public Long numBytesToRead() { + return numBytesToRead; + } + + /** + * @return the file path + */ + public Path path() { + return path; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FileRequestBodyConfiguration that = (FileRequestBodyConfiguration) o; + + if (!Objects.equals(chunkSizeInBytes, that.chunkSizeInBytes)) { + return false; + } + if (!Objects.equals(position, that.position)) { + return false; + } + if (!Objects.equals(numBytesToRead, that.numBytesToRead)) { + return false; + } + return Objects.equals(path, that.path); + } + + @Override + public int hashCode() { + int result = chunkSizeInBytes != null ? chunkSizeInBytes.hashCode() : 0; + result = 31 * result + (position != null ? position.hashCode() : 0); + result = 31 * result + (numBytesToRead != null ? numBytesToRead.hashCode() : 0); + result = 31 * result + (path != null ? path.hashCode() : 0); + return result; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + public interface Builder extends CopyableBuilder { + + /** + * Sets the {@link Path} to the file containing data to send to the service + * + * @param path Path to file to read. + * @return This builder for method chaining. + */ + Builder path(Path path); + + /** + * Sets the size of chunks read from the file. Increasing this will cause more data to be buffered into memory but + * may yield better latencies. Decreasing this will reduce memory usage but may cause reduced latency. Setting this value + * is very dependent on upload speed and requires some performance testing to tune. + * + *

    The default chunk size is 16 KiB

    + * + * @param chunkSize New chunk size in bytes. + * @return This builder for method chaining. + */ + Builder chunkSizeInBytes(Integer chunkSize); + + /** + * Sets the file position at which the request body begins. + * + *

    By default, it's 0, i.e., reading from the beginning. + * + * @param position the position of the file + * @return The builder for method chaining. + */ + Builder position(Long position); + + /** + * Sets the number of bytes to read from this file. + * + *

    By default, it's same as the file length. + * + * @param numBytesToRead number of bytes to read + * @return The builder for method chaining. + */ + Builder numBytesToRead(Long numBytesToRead); + } + + private static final class DefaultBuilder implements Builder { + private Long position; + private Path path; + private Integer chunkSizeInBytes; + private Long numBytesToRead; + + private DefaultBuilder(FileRequestBodyConfiguration configuration) { + this.position = configuration.position; + this.path = configuration.path; + this.chunkSizeInBytes = configuration.chunkSizeInBytes; + this.numBytesToRead = configuration.numBytesToRead; + } + + private DefaultBuilder() { + + } + + @Override + public Builder path(Path path) { + this.path = path; + return this; + } + + @Override + public Builder chunkSizeInBytes(Integer chunkSizeInBytes) { + this.chunkSizeInBytes = chunkSizeInBytes; + return this; + } + + @Override + public Builder position(Long position) { + this.position = position; + return this; + } + + @Override + public Builder numBytesToRead(Long numBytesToRead) { + this.numBytesToRead = numBytesToRead; + return this; + } + + @Override + public FileRequestBodyConfiguration build() { + return new FileRequestBodyConfiguration(this); + } + } + +} \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java index 4c7d70ab755..8fd0fb6d665 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java @@ -29,6 +29,7 @@ import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.core.FileRequestBodyConfiguration; import software.amazon.awssdk.core.internal.async.ByteBuffersAsyncRequestBody; import software.amazon.awssdk.core.internal.async.FileAsyncRequestBody; import software.amazon.awssdk.core.internal.async.InputStreamWithExecutorAsyncRequestBody; @@ -112,16 +113,46 @@ static AsyncRequestBody fromFile(Path path) { /** * Creates an {@link AsyncRequestBody} that produces data from the contents of a file. See - * {@link FileAsyncRequestBody#builder} to create a customized body implementation. + * {@link #fromFile(FileRequestBodyConfiguration)} to create a customized body implementation. * * @param file The file to read from. * @return Implementation of {@link AsyncRequestBody} that reads data from the specified file. - * @see FileAsyncRequestBody */ static AsyncRequestBody fromFile(File file) { return FileAsyncRequestBody.builder().path(file.toPath()).build(); } + /** + * Creates an {@link AsyncRequestBody} that produces data from the contents of a file. + * + * @param configuration configuration for how the SDK should read the file + * @return Implementation of {@link AsyncRequestBody} that reads data from the specified file. + */ + static AsyncRequestBody fromFile(FileRequestBodyConfiguration configuration) { + Validate.notNull(configuration, "configuration"); + return FileAsyncRequestBody.builder() + .path(configuration.path()) + .position(configuration.position()) + .chunkSizeInBytes(configuration.chunkSizeInBytes()) + .numBytesToRead(configuration.numBytesToRead()) + .build(); + } + + /** + * Creates an {@link AsyncRequestBody} that produces data from the contents of a file. + * + *

    + * This is a convenience method that creates an instance of the {@link FileRequestBodyConfiguration} builder, + * avoiding the need to create one manually via {@link FileRequestBodyConfiguration#builder()}. + * + * @param configuration configuration for how the SDK should read the file + * @return Implementation of {@link AsyncRequestBody} that reads data from the specified file. + */ + static AsyncRequestBody fromFile(Consumer configuration) { + Validate.notNull(configuration, "configuration"); + return fromFile(FileRequestBodyConfiguration.builder().applyMutation(configuration).build()); + } + /** * Creates an {@link AsyncRequestBody} that uses a single string as data. * @@ -410,22 +441,18 @@ static AsyncRequestBody empty() { * is 2MB and the default buffer size is 8MB. * *

    - * If content length of this {@link AsyncRequestBody} is present, each divided {@link AsyncRequestBody} is delivered to the - * subscriber right after it's initialized. - *

    - * If content length is null, it is sent after the entire content for that chunk is buffered. - * In this case, the configured {@code maxMemoryUsageInBytes} must be larger than or equal to {@code chunkSizeInBytes}. + * By default, if content length of this {@link AsyncRequestBody} is present, each divided {@link AsyncRequestBody} is + * delivered to the subscriber right after it's initialized. On the other hand, if content length is null, it is sent after + * the entire content for that chunk is buffered. In this case, the configured {@code maxMemoryUsageInBytes} must be larger + * than or equal to {@code chunkSizeInBytes}. Note that this behavior may be different if a specific implementation of this + * interface overrides this method. * * @see AsyncRequestBodySplitConfiguration */ default SdkPublisher split(AsyncRequestBodySplitConfiguration splitConfiguration) { Validate.notNull(splitConfiguration, "splitConfiguration"); - return SplittingPublisher.builder() - .asyncRequestBody(this) - .chunkSizeInBytes(splitConfiguration.chunkSizeInBytes()) - .bufferSizeInBytes(splitConfiguration.bufferSizeInBytes()) - .build(); + return new SplittingPublisher(this, splitConfiguration); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java index fe51f33b4ff..45596ab03ea 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBodySplitConfiguration.java @@ -28,6 +28,12 @@ @SdkPublicApi public final class AsyncRequestBodySplitConfiguration implements ToCopyableBuilder { + private static final long DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024L; + private static final long DEFAULT_BUFFER_SIZE = DEFAULT_CHUNK_SIZE * 4; + private static final AsyncRequestBodySplitConfiguration DEFAULT_CONFIG = builder() + .bufferSizeInBytes(DEFAULT_BUFFER_SIZE) + .chunkSizeInBytes(DEFAULT_CHUNK_SIZE) + .build(); private final Long chunkSizeInBytes; private final Long bufferSizeInBytes; @@ -36,6 +42,10 @@ private AsyncRequestBodySplitConfiguration(DefaultBuilder builder) { this.bufferSizeInBytes = Validate.isPositiveOrNull(builder.bufferSizeInBytes, "bufferSizeInBytes"); } + public static AsyncRequestBodySplitConfiguration defaultConfiguration() { + return DEFAULT_CONFIG; + } + /** * The configured chunk size for each divided {@link AsyncRequestBody}. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java index 146007927c6..fa5d475dd40 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBody.java @@ -28,11 +28,13 @@ import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.async.DelegatingSubscriber; import software.amazon.awssdk.utils.builder.SdkBuilder; /** @@ -129,13 +131,12 @@ public ChecksumCalculatingAsyncRequestBody.Builder trailerHeader(String trailerH @Override public Optional contentLength() { - if (wrapped.contentLength().isPresent() && algorithm != null) { return Optional.of(calculateChunkLength(wrapped.contentLength().get()) - + calculateChecksumContentLength(algorithm, trailerHeader)); - } else { - return wrapped.contentLength(); + + LAST_CHUNK_LEN + + calculateChecksumTrailerLength(algorithm, trailerHeader)); } + return wrapped.contentLength(); } @Override @@ -149,12 +150,15 @@ public void subscribe(Subscriber s) { if (sdkChecksum != null) { sdkChecksum.reset(); } - SynchronousChunkBuffer synchronousChunkBuffer = new SynchronousChunkBuffer(totalBytes); - wrapped.flatMapIterable(synchronousChunkBuffer::buffer) + alwaysInvokeOnNext(wrapped).flatMapIterable(synchronousChunkBuffer::buffer) .subscribe(new ChecksumCalculatingSubscriber(s, sdkChecksum, trailerHeader, totalBytes)); } + private SdkPublisher alwaysInvokeOnNext(SdkPublisher source) { + return subscriber -> source.subscribe(new OnNextGuaranteedSubscriber(subscriber)); + } + private static final class ChecksumCalculatingSubscriber implements Subscriber { private final Subscriber wrapped; @@ -243,4 +247,30 @@ private Iterable buffer(ByteBuffer bytes) { } } + public static class OnNextGuaranteedSubscriber extends DelegatingSubscriber { + + private volatile boolean onNextInvoked; + + public OnNextGuaranteedSubscriber(Subscriber subscriber) { + super(subscriber); + } + + @Override + public void onNext(ByteBuffer t) { + if (!onNextInvoked) { + onNextInvoked = true; + } + + subscriber.onNext(t); + } + + @Override + public void onComplete() { + if (!onNextInvoked) { + subscriber.onNext(ByteBuffer.wrap(new byte[0])); + } + super.onComplete(); + } + } + } \ No newline at end of file diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java index bdf84d549b8..4e465a310c7 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ChunkBuffer.java @@ -25,6 +25,8 @@ import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java index 8f7b2a48360..f8bbdd55208 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java @@ -33,9 +33,12 @@ import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; +import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.core.internal.util.NoopSubscription; import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.NumericUtils; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.SdkBuilder; @@ -65,16 +68,47 @@ public final class FileAsyncRequestBody implements AsyncRequestBody { * Size (in bytes) of ByteBuffer chunks read from the file and delivered to the subscriber. */ private final int chunkSizeInBytes; + private final long position; + private final long numBytesToRead; private FileAsyncRequestBody(DefaultBuilder builder) { this.path = builder.path; this.chunkSizeInBytes = builder.chunkSizeInBytes == null ? DEFAULT_CHUNK_SIZE : builder.chunkSizeInBytes; this.fileLength = invokeSafely(() -> Files.size(path)); + this.position = builder.position == null ? 0 : Validate.isNotNegative(builder.position, "position"); + this.numBytesToRead = builder.numBytesToRead == null ? fileLength - this.position : + Validate.isNotNegative(builder.numBytesToRead, "numBytesToRead"); + } + + @Override + public SdkPublisher split(AsyncRequestBodySplitConfiguration splitConfiguration) { + Validate.notNull(splitConfiguration, "splitConfiguration"); + return new FileAsyncRequestBodySplitHelper(this, splitConfiguration).split(); + } + + public Path path() { + return path; + } + + public long fileLength() { + return fileLength; + } + + public int chunkSizeInBytes() { + return chunkSizeInBytes; + } + + public long position() { + return position; + } + + public long numBytesToRead() { + return numBytesToRead; } @Override public Optional contentLength() { - return Optional.of(fileLength); + return Optional.of(numBytesToRead); } @Override @@ -91,7 +125,7 @@ public void subscribe(Subscriber s) { // We need to synchronize here because the subscriber could call // request() from within onSubscribe which would potentially // trigger onNext before onSubscribe is finished. - Subscription subscription = new FileSubscription(path, channel, s, chunkSizeInBytes); + Subscription subscription = new FileSubscription(channel, s); synchronized (subscription) { s.onSubscribe(subscription); @@ -128,7 +162,7 @@ public interface Builder extends SdkBuilder { Builder path(Path path); /** - * Sets the size of chunks read from the file. Increasing this will cause more data to be buffered into memory but + * Sets the size of chunks to read from the file. Increasing this will cause more data to be buffered into memory but * may yield better latencies. Decreasing this will reduce memory usage but may cause reduced latency. Setting this value * is very dependent on upload speed and requires some performance testing to tune. * @@ -139,12 +173,33 @@ public interface Builder extends SdkBuilder { */ Builder chunkSizeInBytes(Integer chunkSize); + /** + * Sets the file position at which the request body begins. + * + *

    By default, it's 0, i.e., reading from the beginning. + * + * @param position the position of the file + * @return The builder for method chaining. + */ + Builder position(Long position); + + /** + * Sets the number of bytes to read from this file. + * + *

    By default, it's same as the file length. + * + * @param numBytesToRead number of bytes to read + * @return The builder for method chaining. + */ + Builder numBytesToRead(Long numBytesToRead); } private static final class DefaultBuilder implements Builder { + private Long position; private Path path; private Integer chunkSizeInBytes; + private Long numBytesToRead; @Override public Builder path(Path path) { @@ -162,6 +217,18 @@ public Builder chunkSizeInBytes(Integer chunkSizeInBytes) { return this; } + @Override + public Builder position(Long position) { + this.position = position; + return this; + } + + @Override + public Builder numBytesToRead(Long numBytesToRead) { + this.numBytesToRead = numBytesToRead; + return this; + } + public void setChunkSizeInBytes(Integer chunkSizeInBytes) { chunkSizeInBytes(chunkSizeInBytes); } @@ -175,14 +242,12 @@ public FileAsyncRequestBody build() { /** * Reads the file for one subscriber. */ - private static final class FileSubscription implements Subscription { - private final Path path; + private final class FileSubscription implements Subscription { private final AsynchronousFileChannel inputChannel; private final Subscriber subscriber; - private final int chunkSize; - private final AtomicLong position = new AtomicLong(0); - private final AtomicLong remainingBytes = new AtomicLong(0); + private final AtomicLong currentPosition; + private final AtomicLong remainingBytes; private final long sizeAtStart; private final FileTime modifiedTimeAtStart; private long outstandingDemand = 0; @@ -190,17 +255,14 @@ private static final class FileSubscription implements Subscription { private volatile boolean done = false; private final Object lock = new Object(); - private FileSubscription(Path path, - AsynchronousFileChannel inputChannel, - Subscriber subscriber, - int chunkSize) throws IOException { - this.path = path; + private FileSubscription(AsynchronousFileChannel inputChannel, + Subscriber subscriber) throws IOException { this.inputChannel = inputChannel; this.subscriber = subscriber; - this.chunkSize = chunkSize; this.sizeAtStart = inputChannel.size(); this.modifiedTimeAtStart = Files.getLastModifiedTime(path); - this.remainingBytes.set(Validate.isNotNegative(sizeAtStart, "size")); + this.remainingBytes = new AtomicLong(numBytesToRead); + this.currentPosition = new AtomicLong(position); } @Override @@ -255,8 +317,8 @@ private void readData() { return; } - ByteBuffer buffer = ByteBuffer.allocate(chunkSize); - inputChannel.read(buffer, position.get(), buffer, new CompletionHandler() { + ByteBuffer buffer = ByteBuffer.allocate(Math.min(chunkSizeInBytes, NumericUtils.saturatedCast(remainingBytes.get()))); + inputChannel.read(buffer, currentPosition.get(), buffer, new CompletionHandler() { @Override public void completed(Integer result, ByteBuffer attachment) { try { @@ -264,7 +326,7 @@ public void completed(Integer result, ByteBuffer attachment) { attachment.flip(); int readBytes = attachment.remaining(); - position.addAndGet(readBytes); + currentPosition.addAndGet(readBytes); remainingBytes.addAndGet(-readBytes); signalOnNext(attachment); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelper.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelper.java new file mode 100644 index 00000000000..4b0acfbd81f --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelper.java @@ -0,0 +1,185 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkTestInternalApi; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.async.SimplePublisher; + +/** + * A helper class to split a {@link FileAsyncRequestBody} to multiple smaller async request bodies. It ensures the buffer used to + * be under the configured size via {@link AsyncRequestBodySplitConfiguration#bufferSizeInBytes()} by tracking the number of + * concurrent ongoing {@link AsyncRequestBody}s. + */ +@SdkInternalApi +public final class FileAsyncRequestBodySplitHelper { + private static final Logger log = Logger.loggerFor(FileAsyncRequestBodySplitHelper.class); + + private final AtomicBoolean isSendingRequestBody = new AtomicBoolean(false); + private final AtomicLong remainingBytes; + + private final long totalContentLength; + private final Path path; + private final int bufferPerAsyncRequestBody; + private final long totalBufferSize; + private final long chunkSize; + + private volatile boolean isDone = false; + + private AtomicInteger numAsyncRequestBodiesInFlight = new AtomicInteger(0); + private AtomicInteger chunkIndex = new AtomicInteger(0); + + public FileAsyncRequestBodySplitHelper(FileAsyncRequestBody asyncRequestBody, + AsyncRequestBodySplitConfiguration splitConfiguration) { + Validate.notNull(asyncRequestBody, "asyncRequestBody"); + Validate.notNull(splitConfiguration, "splitConfiguration"); + Validate.isTrue(asyncRequestBody.contentLength().isPresent(), "Content length must be present", asyncRequestBody); + this.totalContentLength = asyncRequestBody.contentLength().get(); + this.remainingBytes = new AtomicLong(totalContentLength); + this.path = asyncRequestBody.path(); + this.chunkSize = splitConfiguration.chunkSizeInBytes() == null ? + AsyncRequestBodySplitConfiguration.defaultConfiguration().chunkSizeInBytes() : + splitConfiguration.chunkSizeInBytes(); + this.totalBufferSize = splitConfiguration.bufferSizeInBytes() == null ? + AsyncRequestBodySplitConfiguration.defaultConfiguration().bufferSizeInBytes() : + splitConfiguration.bufferSizeInBytes(); + this.bufferPerAsyncRequestBody = asyncRequestBody.chunkSizeInBytes(); + } + + public SdkPublisher split() { + + SimplePublisher simplePublisher = new SimplePublisher<>(); + + try { + sendAsyncRequestBody(simplePublisher); + } catch (Throwable throwable) { + simplePublisher.error(throwable); + } + + return SdkPublisher.adapt(simplePublisher); + } + + private void sendAsyncRequestBody(SimplePublisher simplePublisher) { + do { + if (!isSendingRequestBody.compareAndSet(false, true)) { + return; + } + + try { + doSendAsyncRequestBody(simplePublisher); + } finally { + isSendingRequestBody.set(false); + } + } while (shouldSendMore()); + } + + private void doSendAsyncRequestBody(SimplePublisher simplePublisher) { + while (shouldSendMore()) { + AsyncRequestBody currentAsyncRequestBody = newFileAsyncRequestBody(simplePublisher); + simplePublisher.send(currentAsyncRequestBody); + numAsyncRequestBodiesInFlight.incrementAndGet(); + checkCompletion(simplePublisher, currentAsyncRequestBody); + } + } + + private void checkCompletion(SimplePublisher simplePublisher, AsyncRequestBody currentAsyncRequestBody) { + long remaining = remainingBytes.addAndGet(-currentAsyncRequestBody.contentLength().get()); + + if (remaining == 0) { + isDone = true; + simplePublisher.complete(); + } else if (remaining < 0) { + isDone = true; + simplePublisher.error(SdkClientException.create( + "Unexpected error occurred. Remaining data is negative: " + remaining)); + } + } + + private void startNextRequestBody(SimplePublisher simplePublisher) { + numAsyncRequestBodiesInFlight.decrementAndGet(); + sendAsyncRequestBody(simplePublisher); + } + + private AsyncRequestBody newFileAsyncRequestBody(SimplePublisher simplePublisher) { + long position = chunkSize * chunkIndex.getAndIncrement(); + long numBytesToReadForThisChunk = Math.min(totalContentLength - position, chunkSize); + FileAsyncRequestBody fileAsyncRequestBody = FileAsyncRequestBody.builder() + .path(path) + .position(position) + .numBytesToRead(numBytesToReadForThisChunk) + .build(); + return new FileAsyncRequestBodyWrapper(fileAsyncRequestBody, simplePublisher); + } + + /** + * Should not send more if it's done OR sending next request body would exceed the total buffer size + */ + private boolean shouldSendMore() { + if (isDone) { + return false; + } + + long currentUsedBuffer = (long) numAsyncRequestBodiesInFlight.get() * bufferPerAsyncRequestBody; + return currentUsedBuffer + bufferPerAsyncRequestBody <= totalBufferSize; + } + + @SdkTestInternalApi + AtomicInteger numAsyncRequestBodiesInFlight() { + return numAsyncRequestBodiesInFlight; + } + + private final class FileAsyncRequestBodyWrapper implements AsyncRequestBody { + + private final FileAsyncRequestBody fileAsyncRequestBody; + private final SimplePublisher simplePublisher; + + FileAsyncRequestBodyWrapper(FileAsyncRequestBody fileAsyncRequestBody, + SimplePublisher simplePublisher) { + this.fileAsyncRequestBody = fileAsyncRequestBody; + this.simplePublisher = simplePublisher; + } + + @Override + public void subscribe(Subscriber s) { + fileAsyncRequestBody.doAfterOnComplete(() -> startNextRequestBody(simplePublisher)) + // The reason we still need to call startNextRequestBody when the subscription is + // cancelled is that upstream could cancel the subscription even though the stream has + // finished successfully before onComplete. If this happens, doAfterOnComplete callback + // will never be invoked, and if the current buffer is full, the publisher will stop + // sending new FileAsyncRequestBody, leading to uncompleted future. + .doAfterOnCancel(() -> startNextRequestBody(simplePublisher)) + .subscribe(s); + } + + @Override + public Optional contentLength() { + return fileAsyncRequestBody.contentLength(); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java index c56d1b6437d..6d8d18a1475 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/SplittingPublisher.java @@ -24,6 +24,7 @@ import org.reactivestreams.Subscription; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.core.exception.NonRetryableException; import software.amazon.awssdk.core.internal.util.NoopSubscription; @@ -41,18 +42,24 @@ @SdkInternalApi public class SplittingPublisher implements SdkPublisher { private static final Logger log = Logger.loggerFor(SplittingPublisher.class); - private static final long DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024L; - private static final long DEFAULT_BUFFER_SIZE = DEFAULT_CHUNK_SIZE * 4; private final AsyncRequestBody upstreamPublisher; private final SplittingSubscriber splittingSubscriber; private final SimplePublisher downstreamPublisher = new SimplePublisher<>(); private final long chunkSizeInBytes; private final long bufferSizeInBytes; - private SplittingPublisher(Builder builder) { - this.upstreamPublisher = Validate.paramNotNull(builder.asyncRequestBody, "asyncRequestBody"); - this.chunkSizeInBytes = builder.chunkSizeInBytes == null ? DEFAULT_CHUNK_SIZE : builder.chunkSizeInBytes; - this.bufferSizeInBytes = builder.bufferSizeInBytes == null ? DEFAULT_BUFFER_SIZE : builder.bufferSizeInBytes; + public SplittingPublisher(AsyncRequestBody asyncRequestBody, + AsyncRequestBodySplitConfiguration splitConfiguration) { + this.upstreamPublisher = Validate.paramNotNull(asyncRequestBody, "asyncRequestBody"); + Validate.notNull(splitConfiguration, "splitConfiguration"); + this.chunkSizeInBytes = splitConfiguration.chunkSizeInBytes() == null ? + AsyncRequestBodySplitConfiguration.defaultConfiguration().chunkSizeInBytes() : + splitConfiguration.chunkSizeInBytes(); + + this.bufferSizeInBytes = splitConfiguration.bufferSizeInBytes() == null ? + AsyncRequestBodySplitConfiguration.defaultConfiguration().bufferSizeInBytes() : + splitConfiguration.bufferSizeInBytes(); + this.splittingSubscriber = new SplittingSubscriber(upstreamPublisher.contentLength().orElse(null)); if (!upstreamPublisher.contentLength().isPresent()) { @@ -62,10 +69,6 @@ private SplittingPublisher(Builder builder) { } } - public static Builder builder() { - return new Builder(); - } - @Override public void subscribe(Subscriber downstreamSubscriber) { downstreamPublisher.subscribe(downstreamSubscriber); @@ -303,29 +306,4 @@ private void addDataBuffered(int length) { } } } - - public static final class Builder { - private AsyncRequestBody asyncRequestBody; - private Long chunkSizeInBytes; - private Long bufferSizeInBytes; - - public Builder asyncRequestBody(AsyncRequestBody asyncRequestBody) { - this.asyncRequestBody = asyncRequestBody; - return this; - } - - public Builder chunkSizeInBytes(Long chunkSizeInBytes) { - this.chunkSizeInBytes = chunkSizeInBytes; - return this; - } - - public Builder bufferSizeInBytes(Long bufferSizeInBytes) { - this.bufferSizeInBytes = bufferSizeInBytes; - return this; - } - - public SplittingPublisher build() { - return new SplittingPublisher(this); - } - } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java index a54fd967837..3b78dedaf2a 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApiCallMetricCollectionStage.java @@ -21,6 +21,7 @@ import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; import software.amazon.awssdk.core.internal.http.pipeline.RequestToResponsePipeline; +import software.amazon.awssdk.core.internal.util.MetricUtils; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; @@ -39,6 +40,7 @@ public ApiCallMetricCollectionStage(RequestPipeline execute(SdkHttpFullRequest input, RequestExecutionContext context) throws Exception { MetricCollector metricCollector = context.executionContext().metricCollector(); + MetricUtils.collectServiceEndpointMetrics(metricCollector, input); // Note: at this point, any exception, even a service exception, will // be thrown from the wrapped pipeline so we can't use diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java index 3d57cedea52..09016026be1 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java @@ -20,6 +20,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.RequestPipeline; +import software.amazon.awssdk.core.internal.util.MetricUtils; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.metrics.MetricCollector; @@ -40,6 +41,7 @@ public AsyncApiCallMetricCollectionStage(RequestPipeline execute(SdkHttpFullRequest input, RequestExecutionContext context) throws Exception { MetricCollector metricCollector = context.executionContext().metricCollector(); + MetricUtils.collectServiceEndpointMetrics(metricCollector, input); CompletableFuture future = new CompletableFuture<>(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/HttpChecksumStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/HttpChecksumStage.java index 84dcd981b22..aaf1c27428d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/HttpChecksumStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/HttpChecksumStage.java @@ -20,8 +20,8 @@ import static software.amazon.awssdk.core.HttpChecksumConstant.DEFAULT_ASYNC_CHUNK_SIZE; import static software.amazon.awssdk.core.HttpChecksumConstant.SIGNING_METHOD; import static software.amazon.awssdk.core.internal.io.AwsChunkedEncodingInputStream.DEFAULT_CHUNK_SIZE; -import static software.amazon.awssdk.core.internal.io.AwsUnsignedChunkedEncodingInputStream.calculateStreamContentLength; -import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChecksumContentLength; +import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChecksumTrailerLength; +import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateStreamContentLength; import static software.amazon.awssdk.core.internal.util.HttpChecksumResolver.getResolvedChecksumSpecs; import static software.amazon.awssdk.http.Header.CONTENT_LENGTH; @@ -179,7 +179,7 @@ private void addFlexibleChecksumInTrailer(SdkHttpFullRequest.Builder request, Re } } - long checksumContentLength = calculateChecksumContentLength(checksumSpecs.algorithm(), checksumSpecs.headerName()); + long checksumContentLength = calculateChecksumTrailerLength(checksumSpecs.algorithm(), checksumSpecs.headerName()); long contentLen = checksumContentLength + calculateStreamContentLength(originalContentLength, chunkSize); request.putHeader(HttpChecksumConstant.HEADER_FOR_TRAILER_REFERENCE, checksumSpecs.headerName()) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsUnsignedChunkedEncodingInputStream.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsUnsignedChunkedEncodingInputStream.java index 186ca5d7d0d..4c7f46a248c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsUnsignedChunkedEncodingInputStream.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/io/AwsUnsignedChunkedEncodingInputStream.java @@ -18,7 +18,6 @@ import java.io.InputStream; import java.nio.charset.StandardCharsets; import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.core.checksums.Algorithm; import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.internal.chunked.AwsChunkedEncodingConfig; @@ -40,48 +39,6 @@ public static Builder builder() { return new Builder(); } - /** - * Calculates the content length for a given Algorithm and header name. - * - * @param algorithm Algorithm used. - * @param headerName Header name. - * @return Content length of the trailer that will be appended at the end. - */ - public static long calculateChecksumContentLength(Algorithm algorithm, String headerName) { - return headerName.length() - + HEADER_COLON_SEPARATOR.length() - + algorithm.base64EncodedLength().longValue() - + CRLF.length() + CRLF.length(); - } - - /** - * - * @param originalContentLength Original Content length. - * @return Calculatec Chunk Length with the chunk encoding format. - */ - private static long calculateChunkLength(long originalContentLength) { - return Long.toHexString(originalContentLength).length() - + CRLF.length() - + originalContentLength - + CRLF.length(); - } - - public static long calculateStreamContentLength(long originalLength, long defaultChunkSize) { - if (originalLength < 0 || defaultChunkSize == 0) { - throw new IllegalArgumentException(originalLength + ", " + defaultChunkSize + "Args <= 0 not expected"); - } - - long maxSizeChunks = originalLength / defaultChunkSize; - long remainingBytes = originalLength % defaultChunkSize; - - long allChunks = maxSizeChunks * calculateChunkLength(defaultChunkSize); - long remainingInChunk = remainingBytes > 0 ? calculateChunkLength(remainingBytes) : 0; - // last byte is composed of a "0" and "\r\n" - long lastByteSize = 1 + (long) CRLF.length(); - - return allChunks + remainingInChunk + lastByteSize; - } - @Override protected byte[] createFinalChunk(byte[] finalChunk) { StringBuilder chunkHeader = new StringBuilder(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ChunkContentUtils.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ChunkContentUtils.java index 54ad5678159..91d47c31449 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ChunkContentUtils.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/ChunkContentUtils.java @@ -28,35 +28,64 @@ public final class ChunkContentUtils { public static final String ZERO_BYTE = "0"; public static final String CRLF = "\r\n"; + public static final String LAST_CHUNK = ZERO_BYTE + CRLF; + public static final long LAST_CHUNK_LEN = LAST_CHUNK.length(); + private ChunkContentUtils() { } /** + * The chunk format is: chunk-size CRLF chunk-data CRLF. + * * @param originalContentLength Original Content length. - * @return Calculates Chunk Length. + * @return the length of this chunk */ public static long calculateChunkLength(long originalContentLength) { + if (originalContentLength == 0) { + return 0; + } return Long.toHexString(originalContentLength).length() - + CRLF.length() - + originalContentLength - + CRLF.length() - + ZERO_BYTE.length() + CRLF.length(); + + CRLF.length() + + originalContentLength + + CRLF.length(); + } + + /** + * Calculates the content length for data that is divided into chunks. + * + * @param originalLength original content length. + * @param chunkSize chunk size + * @return Content length of the trailer that will be appended at the end. + */ + public static long calculateStreamContentLength(long originalLength, long chunkSize) { + if (originalLength < 0 || chunkSize == 0) { + throw new IllegalArgumentException(originalLength + ", " + chunkSize + "Args <= 0 not expected"); + } + + long maxSizeChunks = originalLength / chunkSize; + long remainingBytes = originalLength % chunkSize; + + long allChunks = maxSizeChunks * calculateChunkLength(chunkSize); + long remainingInChunk = remainingBytes > 0 ? calculateChunkLength(remainingBytes) : 0; + // last byte is composed of a "0" and "\r\n" + long lastByteSize = 1 + (long) CRLF.length(); + + return allChunks + remainingInChunk + lastByteSize; } /** - * Calculates the content length for a given Algorithm and header name. + * Calculates the content length for a given algorithm and header name. * * @param algorithm Algorithm used. * @param headerName Header name. * @return Content length of the trailer that will be appended at the end. */ - public static long calculateChecksumContentLength(Algorithm algorithm, String headerName) { - int checksumLength = algorithm.base64EncodedLength(); - - return (headerName.length() - + HEADER_COLON_SEPARATOR.length() - + checksumLength - + CRLF.length() + CRLF.length()); + public static long calculateChecksumTrailerLength(Algorithm algorithm, String headerName) { + return headerName.length() + + HEADER_COLON_SEPARATOR.length() + + algorithm.base64EncodedLength().longValue() + + CRLF.length() + + CRLF.length(); } /** @@ -86,17 +115,13 @@ public static ByteBuffer createChunk(ByteBuffer chunkData, boolean isLastByte) { chunkHeader.append(CRLF); try { byte[] header = chunkHeader.toString().getBytes(StandardCharsets.UTF_8); - // Last byte does not need additional \r\n trailer byte[] trailer = !isLastByte ? CRLF.getBytes(StandardCharsets.UTF_8) : "".getBytes(StandardCharsets.UTF_8); ByteBuffer chunkFormattedBuffer = ByteBuffer.allocate(header.length + chunkLength + trailer.length); - chunkFormattedBuffer.put(header) - .put(chunkData) - .put(trailer); + chunkFormattedBuffer.put(header).put(chunkData).put(trailer); chunkFormattedBuffer.flip(); return chunkFormattedBuffer; } catch (Exception e) { - // This is to warp BufferOverflowException,ReadOnlyBufferException to SdkClientException. throw SdkClientException.builder() .message("Unable to create chunked data. " + e.getMessage()) .cause(e) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java index 8805653dd63..bee59eae51f 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java @@ -18,13 +18,17 @@ import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZN_REQUEST_ID_HEADERS; import static software.amazon.awssdk.core.http.HttpResponseHandler.X_AMZ_ID_2_HEADER; +import java.net.URI; +import java.net.URISyntaxException; import java.time.Duration; import java.util.concurrent.Callable; import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.http.HttpMetric; +import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.SdkHttpFullResponse; import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.metrics.NoOpMetricCollector; @@ -65,6 +69,23 @@ public static Pair measureDurationUnsafe(Callable c) throws return Pair.of(result, d); } + /** + * Collect the SERVICE_ENDPOINT metric for this request. + */ + public static void collectServiceEndpointMetrics(MetricCollector metricCollector, SdkHttpFullRequest httpRequest) { + if (metricCollector != null && !(metricCollector instanceof NoOpMetricCollector) && httpRequest != null) { + // Only interested in the service endpoint so don't include any path, query, or fragment component + URI requestUri = httpRequest.getUri(); + try { + URI serviceEndpoint = new URI(requestUri.getScheme(), requestUri.getAuthority(), null, null, null); + metricCollector.reportMetric(CoreMetric.SERVICE_ENDPOINT, serviceEndpoint); + } catch (URISyntaxException e) { + // This should not happen since getUri() should return a valid URI + throw SdkClientException.create("Unable to collect SERVICE_ENDPOINT metric", e); + } + } + } + public static void collectHttpMetrics(MetricCollector metricCollector, SdkHttpFullResponse httpResponse) { if (metricCollector != null && !(metricCollector instanceof NoOpMetricCollector) && httpResponse != null) { metricCollector.reportMetric(HttpMetric.HTTP_STATUS_CODE, httpResponse.statusCode()); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java index f4529d32c1a..df71deacc27 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/metrics/CoreMetric.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.core.metrics; +import java.net.URI; import java.time.Duration; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.retry.RetryPolicy; @@ -50,6 +51,12 @@ public final class CoreMetric { public static final SdkMetric RETRY_COUNT = metric("RetryCount", Integer.class, MetricLevel.ERROR); + /** + * The endpoint for the service. + */ + public static final SdkMetric SERVICE_ENDPOINT = + metric("ServiceEndpoint", URI.class, MetricLevel.ERROR); + /** * The duration of the API call. This includes all call attempts made. * diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryOnExceptionsCondition.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryOnExceptionsCondition.java index ab143731c93..5589f67ffba 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryOnExceptionsCondition.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/retry/conditions/RetryOnExceptionsCondition.java @@ -42,8 +42,8 @@ private RetryOnExceptionsCondition(Set> exceptionsToR /** * @param context Context about the state of the last request and information about the number of requests made. - * @return True if the exception class matches one of the whitelisted exceptions or if the cause of the exception matches the - * whitelisted exception. + * @return True if the exception class or the cause of the exception matches one of the exceptions supplied at + * initialization time. */ @Override public boolean shouldRetry(RetryPolicyContext context) { @@ -56,10 +56,10 @@ public boolean shouldRetry(RetryPolicyContext context) { Predicate> isRetryableException = ex -> ex.isAssignableFrom(exception.getClass()); - Predicate> hasRetrableCause = + Predicate> hasRetryableCause = ex -> exception.getCause() != null && ex.isAssignableFrom(exception.getCause().getClass()); - return exceptionsToRetryOn.stream().anyMatch(isRetryableException.or(hasRetrableCause)); + return exceptionsToRetryOn.stream().anyMatch(isRetryableException.or(hasRetryableCause)); } /** diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/SdkUserAgent.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/SdkUserAgent.java index f87a70dd550..bbaa7dd2253 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/SdkUserAgent.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/util/SdkUserAgent.java @@ -38,7 +38,7 @@ public final class SdkUserAgent { + "vendor/{java.vendor}"; /** Disallowed characters in the user agent token: @see RFC 7230 */ - private static final String UA_BLACKLIST_REGEX = "[() ,/:;<=>?@\\[\\]{}\\\\]"; + private static final String UA_DENYLIST_REGEX = "[() ,/:;<=>?@\\[\\]{}\\\\]"; /** Shared logger for any issues while loading version information. */ private static final Logger log = LoggerFactory.getLogger(SdkUserAgent.class); @@ -125,7 +125,7 @@ String getUserAgent() { * @return the input with spaces replaced by underscores */ private static String sanitizeInput(String input) { - return input == null ? UNKNOWN : input.replaceAll(UA_BLACKLIST_REGEX, "_"); + return input == null ? UNKNOWN : input.replaceAll(UA_DENYLIST_REGEX, "_"); } private static String getAdditionalJvmLanguages() { diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/FileRequestBodyConfigurationTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/FileRequestBodyConfigurationTest.java new file mode 100644 index 00000000000..535a7176856 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/FileRequestBodyConfigurationTest.java @@ -0,0 +1,73 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.nio.file.Paths; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class FileRequestBodyConfigurationTest { + + @Test + void equalsHashCode() { + EqualsVerifier.forClass(FileRequestBodyConfiguration.class) + .verify(); + } + + @Test + void invalidRequest_shouldThrowException() { + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .path(Paths.get(".")) + .position(-1L) + .build()) + .hasMessage("position must not be negative"); + + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .path(Paths.get(".")) + .numBytesToRead(-1L) + .build()) + .hasMessage("numBytesToRead must not be negative"); + + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .path(Paths.get(".")) + .chunkSizeInBytes(0) + .build()) + .hasMessage("chunkSizeInBytes must be positive"); + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .path(Paths.get(".")) + .chunkSizeInBytes(-5) + .build()) + .hasMessage("chunkSizeInBytes must be positive"); + assertThatThrownBy(() -> FileRequestBodyConfiguration.builder() + .build()) + .hasMessage("path"); + } + + @Test + void toBuilder_shouldCopyAllProperties() { + FileRequestBodyConfiguration config = FileRequestBodyConfiguration.builder() + .path(Paths.get(".")).numBytesToRead(100L) + .position(1L) + .chunkSizeInBytes(1024) + .build(); + + assertThat(config.toBuilder().build()).isEqualTo(config); + } + +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/checksum/AwsChunkedEncodingInputStreamTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/checksum/AwsChunkedEncodingInputStreamTest.java index 0fa862dd2ac..44ac097d16c 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/checksum/AwsChunkedEncodingInputStreamTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/checksum/AwsChunkedEncodingInputStreamTest.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.core.checksum; import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.core.internal.util.ChunkContentUtils.calculateChecksumTrailerLength; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -25,6 +26,7 @@ import software.amazon.awssdk.core.checksums.SdkChecksum; import software.amazon.awssdk.core.internal.io.AwsChunkedEncodingInputStream; import software.amazon.awssdk.core.internal.io.AwsUnsignedChunkedEncodingInputStream; +import software.amazon.awssdk.core.internal.util.ChunkContentUtils; public class AwsChunkedEncodingInputStreamTest { @@ -55,10 +57,9 @@ public void readAwsUnsignedChunkedEncodingInputStream() throws IOException { public void lengthsOfCalculateByChecksumCalculatingInputStream(){ String initialString = "Hello world"; - long calculateChunkLength = AwsUnsignedChunkedEncodingInputStream.calculateStreamContentLength(initialString.length(), - AwsChunkedEncodingInputStream.DEFAULT_CHUNK_SIZE); - long checksumContentLength = AwsUnsignedChunkedEncodingInputStream.calculateChecksumContentLength( - SHA256_ALGORITHM, SHA256_HEADER_NAME); + long calculateChunkLength = ChunkContentUtils.calculateStreamContentLength(initialString.length(), + AwsChunkedEncodingInputStream.DEFAULT_CHUNK_SIZE); + long checksumContentLength = calculateChecksumTrailerLength(SHA256_ALGORITHM, SHA256_HEADER_NAME); assertThat(calculateChunkLength).isEqualTo(19); assertThat(checksumContentLength).isEqualTo(71); } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java index 39abaffd8f7..4aaeaa3c071 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/ChecksumCalculatingAsyncRequestBodyTest.java @@ -28,11 +28,13 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import io.reactivex.Flowable; +import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; import org.assertj.core.util.Lists; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; import software.amazon.awssdk.core.async.AsyncRequestBody; @@ -44,78 +46,91 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; -@RunWith(Parameterized.class) public class ChecksumCalculatingAsyncRequestBodyTest { - private final static String testString = "Hello world"; - private final static String expectedTestString = "b\r\n" + + private static final String testString = "Hello world"; + private static final String expectedTestString = "b\r\n" + testString + "\r\n" + "0\r\n" + "x-amz-checksum-crc32:i9aeUg==\r\n\r\n"; - private final static Path path; - - private final static ByteBuffer positionNonZeroBytebuffer; - - private final static ByteBuffer positionZeroBytebuffer; + private static final String emptyString = ""; + private static final String expectedEmptyString = "0\r\n" + + "x-amz-checksum-crc32:AAAAAA==\r\n\r\n"; + private static final Path path; + private static final Path pathToEmpty; static { - byte[] content = testString.getBytes(); - byte[] randomContent = RandomStringUtils.randomAscii(1024).getBytes(StandardCharsets.UTF_8); - positionNonZeroBytebuffer = ByteBuffer.allocate(content.length + randomContent.length); - positionNonZeroBytebuffer.put(randomContent) - .put(content); - positionNonZeroBytebuffer.position(randomContent.length); - - positionZeroBytebuffer = ByteBuffer.allocate(content.length); - positionZeroBytebuffer.put(content); - positionZeroBytebuffer.flip(); - FileSystem fs = Jimfs.newFileSystem(Configuration.unix()); path = fs.getPath("./test"); + pathToEmpty = fs.getPath("./testEmpty"); try { - Files.write(path, content); + Files.write(path, testString.getBytes()); + Files.write(pathToEmpty, emptyString.getBytes()); + } catch (IOException e) { e.printStackTrace(); } } - private final AsyncRequestBody provider; - - public ChecksumCalculatingAsyncRequestBodyTest(AsyncRequestBody provider) { - this.provider = provider; + private static Stream publishers() { + return Stream.of( + Arguments.of("RequestBody from string, test string", + checksumPublisher(AsyncRequestBody.fromString(testString)), + expectedTestString), + Arguments.of("RequestBody from file, test string", + checksumPublisher(AsyncRequestBody.fromFile(path)), + expectedTestString), + Arguments.of("RequestBody from buffer, 0 pos, test string", + checksumPublisher(AsyncRequestBody.fromRemainingByteBuffer(posZeroByteBuffer(testString))), + expectedTestString), + Arguments.of("RequestBody from buffer, random pos, test string", + checksumPublisher(AsyncRequestBody.fromRemainingByteBufferUnsafe(nonPosZeroByteBuffer(testString))), + expectedTestString), + Arguments.of("RequestBody from string, empty string", + checksumPublisher(AsyncRequestBody.fromString(emptyString)), + expectedEmptyString), + //Note: FileAsyncRequestBody with empty file does not call onNext, only onComplete() + Arguments.of("RequestBody from file, empty string", + checksumPublisher(AsyncRequestBody.fromFile(pathToEmpty)), + expectedEmptyString), + Arguments.of("RequestBody from buffer, 0 pos, empty string", + checksumPublisher(AsyncRequestBody.fromRemainingByteBuffer(posZeroByteBuffer(emptyString))), + expectedEmptyString), + Arguments.of("RequestBody from string, random pos, empty string", + checksumPublisher(AsyncRequestBody.fromRemainingByteBufferUnsafe(nonPosZeroByteBuffer(emptyString))), + expectedEmptyString)); } - @Parameterized.Parameters - public static AsyncRequestBody[] data() { - AsyncRequestBody[] asyncRequestBodies = { - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromString(testString)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), - - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromFile(path)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), + private static ChecksumCalculatingAsyncRequestBody checksumPublisher(AsyncRequestBody sourcePublisher) { + return ChecksumCalculatingAsyncRequestBody.builder() + .asyncRequestBody(sourcePublisher) + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amz-checksum-crc32").build(); + } - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromRemainingByteBuffer(positionZeroBytebuffer)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), - ChecksumCalculatingAsyncRequestBody.builder() - .asyncRequestBody(AsyncRequestBody.fromRemainingByteBuffersUnsafe(positionNonZeroBytebuffer)) - .algorithm(Algorithm.CRC32) - .trailerHeader("x-amz-checksum-crc32").build(), - }; - return asyncRequestBodies; + private static ByteBuffer posZeroByteBuffer(String content) { + byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + ByteBuffer bytes = ByteBuffer.allocate(contentBytes.length); + bytes.put(contentBytes); + bytes.flip(); + return bytes; } - @Test - public void hasCorrectLength() { - assertThat(provider.contentLength()).hasValue((long) expectedTestString.length()); + private static ByteBuffer nonPosZeroByteBuffer(String content) { + byte[] randomContent = RandomStringUtils.randomAscii(1024).getBytes(StandardCharsets.UTF_8); + byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); + + ByteBuffer bytes = ByteBuffer.allocate(contentBytes.length + randomContent.length); + bytes.put(randomContent) + .put(contentBytes); + bytes.position(randomContent.length); + return bytes; } - @Test - public void hasCorrectContent() throws InterruptedException { + @ParameterizedTest(name = "{index} {0}") + @MethodSource("publishers") + public void publish_differentAsyncRequestBodiesAndSources_produceCorrectData(String description, + AsyncRequestBody provider, + String expectedContent) throws InterruptedException { StringBuilder sb = new StringBuilder(); CountDownLatch done = new CountDownLatch(1); @@ -136,14 +151,15 @@ public void onComplete() { done.countDown(); } }; - provider.subscribe(subscriber); done.await(10, TimeUnit.SECONDS); - assertThat(sb).hasToString(expectedTestString); + + assertThat(provider.contentLength()).hasValue((long) expectedContent.length()); + assertThat(sb).hasToString(expectedContent); } @Test - public void stringConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromString_hasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.fromString("Hello world")) .algorithm(Algorithm.CRC32) @@ -153,7 +169,7 @@ public void stringConstructorHasCorrectContentType() { } @Test - public void fileConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromFile_hasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.fromFile(path)) .algorithm(Algorithm.CRC32) @@ -163,7 +179,7 @@ public void fileConstructorHasCorrectContentType() { } @Test - public void bytesArrayConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromBytes_hasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.fromBytes("hello world".getBytes())) .algorithm(Algorithm.CRC32) @@ -173,7 +189,7 @@ public void bytesArrayConstructorHasCorrectContentType() { } @Test - public void bytesBufferConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromByteBuffer_hasCorrectContentType() { ByteBuffer byteBuffer = ByteBuffer.wrap("hello world".getBytes()); AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.fromByteBuffer(byteBuffer)) @@ -184,7 +200,7 @@ public void bytesBufferConstructorHasCorrectContentType() { } @Test - public void emptyBytesConstructorHasCorrectContentType() { + public void constructor_asyncRequestBodyFromEmpty_hasCorrectContentType() { AsyncRequestBody requestBody = ChecksumCalculatingAsyncRequestBody.builder() .asyncRequestBody(AsyncRequestBody.empty()) .algorithm(Algorithm.CRC32) @@ -194,7 +210,7 @@ public void emptyBytesConstructorHasCorrectContentType() { } @Test - public void publisherConstructorThrowsExceptionIfNoContentLength() { + public void constructor_asyncRequestBodyFromPublisher_NoContentLength_throwsException() { List requestBodyStrings = Lists.newArrayList("A", "B", "C"); List bodyBytes = requestBodyStrings.stream() .map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8))) @@ -210,22 +226,31 @@ public void publisherConstructorThrowsExceptionIfNoContentLength() { } @Test - public void fromBytes_NullChecks() { - - ChecksumCalculatingAsyncRequestBody.Builder noAlgorithmBuilder = ChecksumCalculatingAsyncRequestBody - .builder() - .asyncRequestBody( - AsyncRequestBody.fromString("Hello world")); + public void constructor_checksumIsNull_throwsException() { + assertThatExceptionOfType(NullPointerException.class).isThrownBy( + () -> ChecksumCalculatingAsyncRequestBody.builder() + .asyncRequestBody(AsyncRequestBody.fromString("Hello world")) + .trailerHeader("x-amzn-checksum-crc32") + .build()).withMessage("algorithm cannot be null"); + } - assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> noAlgorithmBuilder.build()); + @Test + public void constructor_asyncRequestBodyIsNull_throwsException() { + assertThatExceptionOfType(NullPointerException.class).isThrownBy( + () -> ChecksumCalculatingAsyncRequestBody.builder() + .algorithm(Algorithm.CRC32) + .trailerHeader("x-amzn-checksum-crc32") + .build()).withMessage("wrapped AsyncRequestBody cannot be null"); + } - ChecksumCalculatingAsyncRequestBody.Builder noAsyncReqBodyBuilder = ChecksumCalculatingAsyncRequestBody - .builder().algorithm(Algorithm.CRC32).trailerHeader("x-amzn-checksum-crc32"); - assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> noAsyncReqBodyBuilder.build()); + @Test + public void constructor_trailerHeaderIsNull_throwsException() { + assertThatExceptionOfType(NullPointerException.class).isThrownBy( + () -> ChecksumCalculatingAsyncRequestBody.builder() + .algorithm(Algorithm.CRC32) + .asyncRequestBody(AsyncRequestBody.fromString("Hello world")) + .build()).withMessage("trailerHeader cannot be null"); - ChecksumCalculatingAsyncRequestBody.Builder noTrailerHeaderBuilder = ChecksumCalculatingAsyncRequestBody - .builder().asyncRequestBody(AsyncRequestBody.fromString("Hello world")).algorithm(Algorithm.CRC32); - assertThatExceptionOfType(NullPointerException.class).isThrownBy(() -> noTrailerHeaderBuilder.build()); } @Test diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelperTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelperTest.java new file mode 100644 index 00000000000..4c5d0748d16 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodySplitHelperTest.java @@ -0,0 +1,96 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.core.internal.async.SplittingPublisherTestUtils.verifyIndividualAsyncRequestBody; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; +import software.amazon.awssdk.testutils.RandomTempFile; + +public class FileAsyncRequestBodySplitHelperTest { + + private static final int CHUNK_SIZE = 5; + private static Path testFile; + private static ScheduledExecutorService executor; + + + @BeforeAll + public static void setup() throws IOException { + testFile = new RandomTempFile(2000).toPath(); + executor = Executors.newScheduledThreadPool(1); + } + + @AfterAll + public static void teardown() throws IOException { + try { + Files.delete(testFile); + } catch (NoSuchFileException e) { + // ignore + } + executor.shutdown(); + } + + @ParameterizedTest + @ValueSource(ints = {CHUNK_SIZE, CHUNK_SIZE * 2 - 1, CHUNK_SIZE * 2}) + public void split_differentChunkSize_shouldSplitCorrectly(int chunkSize) throws Exception { + long bufferSize = 55l; + int chunkSizeInBytes = 10; + FileAsyncRequestBody fileAsyncRequestBody = FileAsyncRequestBody.builder() + .path(testFile) + .chunkSizeInBytes(10) + .build(); + AsyncRequestBodySplitConfiguration config = + AsyncRequestBodySplitConfiguration.builder() + .chunkSizeInBytes((long) chunkSize) + .bufferSizeInBytes(55L) + .build(); + FileAsyncRequestBodySplitHelper helper = new FileAsyncRequestBodySplitHelper(fileAsyncRequestBody, config); + + AtomicInteger maxConcurrency = new AtomicInteger(0); + ScheduledFuture scheduledFuture = executor.scheduleWithFixedDelay(verifyConcurrentRequests(helper, maxConcurrency), + 1, 50, TimeUnit.MICROSECONDS); + + verifyIndividualAsyncRequestBody(helper.split(), testFile, chunkSize); + scheduledFuture.cancel(true); + int expectedMaxConcurrency = (int) (bufferSize / chunkSizeInBytes); + assertThat(maxConcurrency.get()).isLessThanOrEqualTo(expectedMaxConcurrency); + } + + private static Runnable verifyConcurrentRequests(FileAsyncRequestBodySplitHelper helper, AtomicInteger maxConcurrency) { + return () -> { + int concurrency = helper.numAsyncRequestBodiesInFlight().get(); + + if (concurrency > maxConcurrency.get()) { + maxConcurrency.set(concurrency); + } + assertThat(helper.numAsyncRequestBodiesInFlight()).hasValueLessThan(10); + }; + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodyTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodyTest.java index da9daf557e2..5d12035c187 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodyTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBodyTest.java @@ -15,11 +15,14 @@ package software.amazon.awssdk.core.internal.async; +import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.jupiter.api.Assertions.assertTrue; +import static software.amazon.awssdk.core.internal.async.SplittingPublisherTestUtils.verifyIndividualAsyncRequestBody; import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; import java.io.ByteArrayOutputStream; +import java.io.FileInputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -35,9 +38,12 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; import software.amazon.awssdk.testutils.RandomTempFile; import software.amazon.awssdk.utils.BinaryUtils; @@ -45,10 +51,12 @@ public class FileAsyncRequestBodyTest { private static final long MiB = 1024 * 1024; private static final long TEST_FILE_SIZE = 10 * MiB; private static Path testFile; + private static Path smallFile; @BeforeEach public void setup() throws IOException { testFile = new RandomTempFile(TEST_FILE_SIZE).toPath(); + smallFile = new RandomTempFile(100).toPath(); } @AfterEach @@ -226,6 +234,84 @@ public void changingFile_fileGetsDeleted_failsBecauseDeleted() throws Exception .hasCauseInstanceOf(IOException.class); } + @Test + public void positionNotZero_shouldReadFromPosition() throws Exception { + CompletableFuture future = new CompletableFuture<>(); + long position = 20L; + AsyncRequestBody asyncRequestBody = FileAsyncRequestBody.builder() + .path(smallFile) + .position(position) + .chunkSizeInBytes(10) + .build(); + + ByteArrayAsyncResponseTransformer.BaosSubscriber baosSubscriber = + new ByteArrayAsyncResponseTransformer.BaosSubscriber(future); + asyncRequestBody.subscribe(baosSubscriber); + assertThat(asyncRequestBody.contentLength()).contains(80L); + + byte[] bytes = future.get(1, TimeUnit.SECONDS); + + byte[] expected = new byte[80]; + try(FileInputStream inputStream = new FileInputStream(smallFile.toFile())) { + inputStream.skip(position); + inputStream.read(expected, 0, 80); + } + + assertThat(bytes).isEqualTo(expected); + } + + @Test + public void bothPositionAndNumBytesToReadConfigured_shouldHonor() throws Exception { + CompletableFuture future = new CompletableFuture<>(); + long position = 20L; + long numBytesToRead = 5L; + AsyncRequestBody asyncRequestBody = FileAsyncRequestBody.builder() + .path(smallFile) + .position(position) + .numBytesToRead(numBytesToRead) + .chunkSizeInBytes(10) + .build(); + + ByteArrayAsyncResponseTransformer.BaosSubscriber baosSubscriber = + new ByteArrayAsyncResponseTransformer.BaosSubscriber(future); + asyncRequestBody.subscribe(baosSubscriber); + assertThat(asyncRequestBody.contentLength()).contains(numBytesToRead); + + byte[] bytes = future.get(1, TimeUnit.SECONDS); + + byte[] expected = new byte[5]; + try (FileInputStream inputStream = new FileInputStream(smallFile.toFile())) { + inputStream.skip(position); + inputStream.read(expected, 0, 5); + } + + assertThat(bytes).isEqualTo(expected); + } + + @Test + public void numBytesToReadConfigured_shouldHonor() throws Exception { + CompletableFuture future = new CompletableFuture<>(); + AsyncRequestBody asyncRequestBody = FileAsyncRequestBody.builder() + .path(smallFile) + .numBytesToRead(5L) + .chunkSizeInBytes(10) + .build(); + + ByteArrayAsyncResponseTransformer.BaosSubscriber baosSubscriber = + new ByteArrayAsyncResponseTransformer.BaosSubscriber(future); + asyncRequestBody.subscribe(baosSubscriber); + assertThat(asyncRequestBody.contentLength()).contains(5L); + + byte[] bytes = future.get(1, TimeUnit.SECONDS); + + byte[] expected = new byte[5]; + try (FileInputStream inputStream = new FileInputStream(smallFile.toFile())) { + inputStream.read(expected, 0, 5); + } + + assertThat(bytes).isEqualTo(expected); + } + private static class ControllableSubscriber implements Subscriber { private final ByteArrayOutputStream output = new ByteArrayOutputStream(); private final CompletableFuture completed = new CompletableFuture<>(); diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java index 0966ea6eb76..d2e06f28492 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTest.java @@ -17,6 +17,7 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static software.amazon.awssdk.core.internal.async.SplittingPublisherTestUtils.verifyIndividualAsyncRequestBody; import static software.amazon.awssdk.utils.FunctionalUtils.invokeSafely; import java.io.ByteArrayInputStream; @@ -44,6 +45,7 @@ import org.reactivestreams.Subscriber; import org.reactivestreams.Subscription; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncRequestBodySplitConfiguration; import software.amazon.awssdk.utils.BinaryUtils; public class SplittingPublisherTest { @@ -72,11 +74,10 @@ public static void afterAll() throws Exception { public void split_contentUnknownMaxMemorySmallerThanChunkSize_shouldThrowException() { AsyncRequestBody body = AsyncRequestBody.fromPublisher(s -> { }); - assertThatThrownBy(() -> SplittingPublisher.builder() - .asyncRequestBody(body) - .chunkSizeInBytes(10L) - .bufferSizeInBytes(5L) - .build()) + assertThatThrownBy(() -> new SplittingPublisher(body, AsyncRequestBodySplitConfiguration.builder() + .chunkSizeInBytes(10L) + .bufferSizeInBytes(5L) + .build())) .hasMessageContaining("must be larger than or equal"); } @@ -106,11 +107,10 @@ public Optional contentLength() { return Optional.empty(); } }; - SplittingPublisher splittingPublisher = SplittingPublisher.builder() - .asyncRequestBody(asyncRequestBody) + SplittingPublisher splittingPublisher = new SplittingPublisher(asyncRequestBody, AsyncRequestBodySplitConfiguration.builder() .chunkSizeInBytes((long) CHUNK_SIZE) .bufferSizeInBytes(10L) - .build(); + .build()); List> futures = new ArrayList<>(); @@ -148,38 +148,13 @@ public Optional contentLength() { private static void verifySplitContent(AsyncRequestBody asyncRequestBody, int chunkSize) throws Exception { - SplittingPublisher splittingPublisher = SplittingPublisher.builder() - .asyncRequestBody(asyncRequestBody) - .chunkSizeInBytes((long) chunkSize) - .bufferSizeInBytes((long) chunkSize * 4) - .build(); + SplittingPublisher splittingPublisher = new SplittingPublisher(asyncRequestBody, + AsyncRequestBodySplitConfiguration.builder() + .chunkSizeInBytes((long) chunkSize) + .bufferSizeInBytes((long) chunkSize * 4) + .build()); - List> futures = new ArrayList<>(); - - splittingPublisher.subscribe(requestBody -> { - CompletableFuture baosFuture = new CompletableFuture<>(); - BaosSubscriber subscriber = new BaosSubscriber(baosFuture); - futures.add(baosFuture); - requestBody.subscribe(subscriber); - }).get(5, TimeUnit.SECONDS); - - assertThat(futures.size()).isEqualTo((int) Math.ceil(CONTENT_SIZE / (double) chunkSize)); - - for (int i = 0; i < futures.size(); i++) { - try (FileInputStream fileInputStream = new FileInputStream(testFile)) { - byte[] expected; - if (i == futures.size() - 1) { - int lastChunk = CONTENT_SIZE % chunkSize == 0 ? chunkSize : (CONTENT_SIZE % chunkSize); - expected = new byte[lastChunk]; - } else { - expected = new byte[chunkSize]; - } - fileInputStream.skip(i * chunkSize); - fileInputStream.read(expected); - byte[] actualBytes = futures.get(i).join(); - assertThat(actualBytes).isEqualTo(expected); - }; - } + verifyIndividualAsyncRequestBody(splittingPublisher, testFile.toPath(), chunkSize); } private static class TestAsyncRequestBody implements AsyncRequestBody { diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTestUtils.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTestUtils.java new file mode 100644 index 00000000000..04da97adbf4 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/async/SplittingPublisherTestUtils.java @@ -0,0 +1,70 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.internal.async; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; + +import java.io.File; +import java.io.FileInputStream; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import org.assertj.core.api.Assertions; +import org.reactivestreams.Publisher; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.SdkPublisher; +import software.amazon.awssdk.core.internal.async.ByteArrayAsyncResponseTransformer; +import software.amazon.awssdk.core.internal.async.SplittingPublisherTest; + +public final class SplittingPublisherTestUtils { + + public static void verifyIndividualAsyncRequestBody(SdkPublisher publisher, + Path file, + int chunkSize) throws Exception { + + List> futures = new ArrayList<>(); + publisher.subscribe(requestBody -> { + CompletableFuture baosFuture = new CompletableFuture<>(); + ByteArrayAsyncResponseTransformer.BaosSubscriber subscriber = + new ByteArrayAsyncResponseTransformer.BaosSubscriber(baosFuture); + requestBody.subscribe(subscriber); + futures.add(baosFuture); + }).get(5, TimeUnit.SECONDS); + + long contentLength = file.toFile().length(); + Assertions.assertThat(futures.size()).isEqualTo((int) Math.ceil(contentLength / (double) chunkSize)); + + for (int i = 0; i < futures.size(); i++) { + try (FileInputStream fileInputStream = new FileInputStream(file.toFile())) { + byte[] expected; + if (i == futures.size() - 1) { + int lastChunk = contentLength % chunkSize == 0 ? chunkSize : (int) (contentLength % chunkSize); + expected = new byte[lastChunk]; + } else { + expected = new byte[chunkSize]; + } + fileInputStream.skip(i * chunkSize); + fileInputStream.read(expected); + byte[] actualBytes = futures.get(i).join(); + Assertions.assertThat(actualBytes).isEqualTo(expected); + } + } + } +} diff --git a/docs/LaunchChangelog.md b/docs/LaunchChangelog.md index fc576a0d6f3..4748d67457d 100644 --- a/docs/LaunchChangelog.md +++ b/docs/LaunchChangelog.md @@ -827,3 +827,4 @@ The following libraries are available in 2.0: | Waiters | [Waiters](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/waiters.html) | 2.15.0 | | CloudFrontUrlSigner, CloudFrontCookieSigner | [CloudFrontUtilities](https://aws.amazon.com/blogs/developer/amazon-cloudfront-signed-urls-and-cookies-are-now-supported-in-aws-sdk-for-java-2-x/) | 2.18.33 | | TransferManager | [S3TransferManager](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/transfer-manager.html) | 2.19.0 | +| IAM Policy Builder | [IAM Policy Builder](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/feature-iam-policy-builder.html) | 2.20.126 diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index 57efd1b26ea..364afbc8a2c 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 4035dce765c..f998d9ea3d7 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT apache-client diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 6cfc5639362..491b8171ede 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index bf36a80542d..3f7577e37ce 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/http-clients/pom.xml b/http-clients/pom.xml index 0bb3c35ddaa..0867a1706cb 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index eef4dcc4925..51b6dc56c58 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index 6618f5733e7..0381ef824f3 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index 03561273bd0..47958a8f8d9 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index ae803de3621..d9809eea43f 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -91,7 +91,7 @@ ${project.version} - 2.20.125 + 2.20.137 2.13.2 2.13.4.2 2.13.2 @@ -135,7 +135,7 @@ 9.4.45.v20220203 - 3.0.0-M5 + 3.1.2 3.8.1 3.1.2 3.0.0-M5 @@ -145,7 +145,7 @@ 3.1.1 1.6 8.42 - 0.8.7 + 0.8.10 1.6.8 1.6.0 2.8.2 @@ -169,7 +169,7 @@ 4.4.13 - 1.0.3 + 1.0.4 ${skipTests} ${project.basedir}/src/it/java @@ -256,7 +256,6 @@ maven-surefire-plugin ${maven.surefire.version} - ${argLine} **/*StabilityTest.java **/*StabilityTests.java diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 12d2868b89e..8cef5978213 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../pom.xml release-scripts diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index d92f5545390..a9aafd6e3d0 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtension.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtension.java index 0337ba209cb..69a7807bb97 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtension.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtension.java @@ -19,8 +19,10 @@ import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.valueRef; import static software.amazon.awssdk.enhanced.dynamodb.internal.update.UpdateExpressionUtils.ifNotExists; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkPublicApi; @@ -37,12 +39,12 @@ import software.amazon.awssdk.enhanced.dynamodb.update.UpdateExpression; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.Logger; /** - * This extension enables atomic counter attributes to be written to the database. - * The extension is loaded by default when you instantiate a - * {@link DynamoDbEnhancedClient} and only needs to be added to the client if you - * are adding custom extensions to the client. + * This extension enables atomic counter attributes to be changed in DynamoDb by creating instructions for modifying + * an existing value or setting a start value. The extension is loaded by default when you instantiate a + * {@link DynamoDbEnhancedClient} and only needs to be added to the client if you are adding custom extensions to the client. *

    * To utilize atomic counters, first create a field in your model that will be used to store the counter. * This class field should of type {@link Long} and you need to tag it as an atomic counter: @@ -56,8 +58,7 @@ *

    * Every time a new update of the record is successfully written to the database, the counter will be updated automatically. * By default, the counter starts at 0 and increments by 1 for each update. The tags provide the capability of adjusting - * the counter start and increment/decrement values such as described in - * {@link DynamoDbAtomicCounter}. + * the counter start and increment/decrement values such as described in {@link DynamoDbAtomicCounter}. *

    * Example 1: Using a bean based table schema *

    @@ -86,10 +87,18 @@
      * }
      * 
    *

    - * NOTE: When using putItem, the counter will be reset to its start value. + * NOTES: + *

      + *
    • When using putItem, the counter will be reset to its start value.
    • + *
    • The extension will remove any existing occurrences of the atomic counter attributes from the record during an + * updateItem operation. Manually editing attributes marked as atomic counters will have NO EFFECT.
    • + *
    */ @SdkPublicApi public final class AtomicCounterExtension implements DynamoDbEnhancedClientExtension { + + private static final Logger log = Logger.loggerFor(AtomicCounterExtension.class); + private AtomicCounterExtension() { } @@ -118,6 +127,7 @@ public WriteModification beforeWrite(DynamoDbExtensionContext.BeforeWrite contex break; case UPDATE_ITEM: modificationBuilder.updateExpression(createUpdateExpression(counters)); + modificationBuilder.transformedItem(filterFromItem(counters, context.items())); break; default: break; } @@ -136,6 +146,22 @@ private Map addToItem(Map counter return Collections.unmodifiableMap(itemToTransform); } + private Map filterFromItem(Map counters, Map items) { + Map itemToTransform = new HashMap<>(items); + List removedAttributes = new ArrayList<>(); + for (String attributeName : counters.keySet()) { + if (itemToTransform.containsKey(attributeName)) { + itemToTransform.remove(attributeName); + removedAttributes.add(attributeName); + } + } + if (!removedAttributes.isEmpty()) { + log.debug(() -> String.format("Filtered atomic counter attributes from existing update item to avoid collisions: %s", + String.join(",", removedAttributes))); + } + return Collections.unmodifiableMap(itemToTransform); + } + private SetAction counterAction(Map.Entry e) { String attributeName = e.getKey(); AtomicCounter counter = e.getValue(); diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java index 5d1ea52390f..0a9706d1379 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/mapper/annotations/DynamoDbBean.java @@ -67,7 +67,7 @@ * } * * public void setId(String id) { - * this.name = id; + * this.id = id; * } * * public Instant getCreatedOn() { diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtensionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtensionTest.java index 4ca347f038b..6ee6cf915d7 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtensionTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/AtomicCounterExtensionTest.java @@ -87,7 +87,10 @@ public void beforeWrite_updateItemOperation_hasCounters_createsUpdateExpression( .operationName(OperationName.UPDATE_ITEM) .operationContext(PRIMARY_CONTEXT).build()); - assertThat(result.transformedItem()).isNull(); + Map transformedItem = result.transformedItem(); + assertThat(transformedItem).isNotNull().hasSize(1); + assertThat(transformedItem).containsEntry("id", AttributeValue.fromS(RECORD_ID)); + assertThat(result.updateExpression()).isNotNull(); List setActions = result.updateExpression().setActions(); @@ -112,11 +115,39 @@ public void beforeWrite_updateItemOperation_noCounters_noChanges() { .tableMetadata(SIMPLE_ITEM_MAPPER.tableMetadata()) .operationName(OperationName.UPDATE_ITEM) .operationContext(PRIMARY_CONTEXT).build()); - assertThat(result.transformedItem()).isNull(); assertThat(result.updateExpression()).isNull(); } + @Test + public void beforeWrite_updateItemOperation_hasCountersInItem_createsUpdateExpressionAndFilters() { + AtomicCounterItem atomicCounterItem = new AtomicCounterItem(); + atomicCounterItem.setId(RECORD_ID); + atomicCounterItem.setCustomCounter(255L); + + Map items = ITEM_MAPPER.itemToMap(atomicCounterItem, true); + assertThat(items).hasSize(2); + + WriteModification result = + atomicCounterExtension.beforeWrite(DefaultDynamoDbExtensionContext.builder() + .items(items) + .tableMetadata(ITEM_MAPPER.tableMetadata()) + .operationName(OperationName.UPDATE_ITEM) + .operationContext(PRIMARY_CONTEXT).build()); + + Map transformedItem = result.transformedItem(); + assertThat(transformedItem).isNotNull().hasSize(1); + assertThat(transformedItem).containsEntry("id", AttributeValue.fromS(RECORD_ID)); + + assertThat(result.updateExpression()).isNotNull(); + + List setActions = result.updateExpression().setActions(); + assertThat(setActions).hasSize(2); + + verifyAction(setActions, "customCounter", "5", "5"); + verifyAction(setActions, "defaultCounter", "-1", "1"); + } + @Test public void beforeWrite_putItemOperation_hasCounters_createsItemTransform() { AtomicCounterItem atomicCounterItem = new AtomicCounterItem(); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AtomicCounterTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AtomicCounterTest.java index a07d16a8f5d..9b3d12e6d55 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AtomicCounterTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/functionaltests/AtomicCounterTest.java @@ -112,15 +112,35 @@ public void createViaPut_incrementsCorrectly() { } @Test - public void createViaUpdate_settingCounterInPojo_throwsException() { + public void createViaUpdate_settingCounterInPojo_hasNoEffect() { AtomicCounterRecord record = new AtomicCounterRecord(); record.setId(RECORD_ID); record.setDefaultCounter(10L); record.setAttribute1(STRING_VALUE); - assertThatThrownBy(() -> mappedTable.updateItem(record)) - .isInstanceOf(DynamoDbException.class) - .hasMessageContaining("Two document paths"); + mappedTable.updateItem(record); + AtomicCounterRecord persistedRecord = mappedTable.getItem(record); + assertThat(persistedRecord.getAttribute1()).isEqualTo(STRING_VALUE); + assertThat(persistedRecord.getDefaultCounter()).isEqualTo(0L); + assertThat(persistedRecord.getCustomCounter()).isEqualTo(10L); + assertThat(persistedRecord.getDecreasingCounter()).isEqualTo(-20L); + } + + @Test + public void updateItem_retrievedFromDb_shouldNotThrowException() { + AtomicCounterRecord record = new AtomicCounterRecord(); + record.setId(RECORD_ID); + record.setAttribute1(STRING_VALUE); + mappedTable.updateItem(record); + + AtomicCounterRecord retrievedRecord = mappedTable.getItem(record); + retrievedRecord.setAttribute1("ChangingThisAttribute"); + + retrievedRecord = mappedTable.updateItem(retrievedRecord); + assertThat(retrievedRecord).isNotNull(); + assertThat(retrievedRecord.getDefaultCounter()).isEqualTo(1L); + assertThat(retrievedRecord.getCustomCounter()).isEqualTo(15L); + assertThat(retrievedRecord.getDecreasingCounter()).isEqualTo(-21L); } @Test diff --git a/services-custom/iam-policy-builder/pom.xml b/services-custom/iam-policy-builder/pom.xml index 12012995d49..053e624e641 100644 --- a/services-custom/iam-policy-builder/pom.xml +++ b/services-custom/iam-policy-builder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml iam-policy-builder diff --git a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java index 0fd0354a395..fd28e616881 100644 --- a/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java +++ b/services-custom/iam-policy-builder/src/main/java/software/amazon/awssdk/policybuilder/iam/IamPolicy.java @@ -80,8 +80,9 @@ * String policyVersion = getPolicyResponse.defaultVersionId(); * GetPolicyVersionResponse getPolicyVersionResponse = * iam.getPolicyVersion(r -> r.policyArn(policyArn).versionId(policyVersion)); - * - * IamPolicy policy = IamPolicy.fromJson(getPolicyVersionResponse.policyVersion().document()); + * + * String decodedPolicy = URLDecoder.decode(getPolicyVersionResponse.policyVersion().document(), StandardCharsets.UTF_8); + * IamPolicy policy = IamPolicy.fromJson(decodedPolicy); * * IamStatement newStatement = policy.statements().get(0).copy(s -> s.addAction("dynamodb:GetItem")); * IamPolicy newPolicy = policy.copy(p -> p.statements(Arrays.asList(newStatement))); diff --git a/services-custom/pom.xml b/services-custom/pom.xml index c8634fbe963..dc50e8c78cf 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index 002a3500039..f4d4cafe64c 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index 65bcc0c362b..624e7a104f6 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/account/pom.xml b/services/account/pom.xml index ba25a84b29f..71d65a9495d 100644 --- a/services/account/pom.xml +++ b/services/account/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT account AWS Java SDK :: Services :: Account diff --git a/services/acm/pom.xml b/services/acm/pom.xml index a71a52d72f6..fefe113ea6f 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 3253e20cb6f..98b4fe3014f 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/alexaforbusiness/pom.xml b/services/alexaforbusiness/pom.xml index dcae2f09462..48a2e8758f5 100644 --- a/services/alexaforbusiness/pom.xml +++ b/services/alexaforbusiness/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 alexaforbusiness diff --git a/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config b/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config index a07c63b58af..9ef3f846f7a 100644 --- a/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config +++ b/services/alexaforbusiness/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["*"] + "excludedSimpleMethods" : ["*"] } diff --git a/services/amp/pom.xml b/services/amp/pom.xml index 0a90c7e6e5d..361d6682da3 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 0dbc6c19bfb..e1de969b3e6 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index af367f1aa95..796ce38a362 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifyuibuilder/pom.xml b/services/amplifyuibuilder/pom.xml index 46c9847752d..cf11205cdf3 100644 --- a/services/amplifyuibuilder/pom.xml +++ b/services/amplifyuibuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT amplifyuibuilder AWS Java SDK :: Services :: Amplify UI Builder diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index d40d9234c95..67c6c231386 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigateway/src/main/resources/codegen-resources/customization.config b/services/apigateway/src/main/resources/codegen-resources/customization.config index 040a30404bf..9662110a579 100644 --- a/services/apigateway/src/main/resources/codegen-resources/customization.config +++ b/services/apigateway/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "emitAsShape": "String" } }, - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "updateAccount", "createApiKey", "generateClientCertificate" diff --git a/services/apigateway/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/apigateway/src/main/resources/codegen-resources/endpoint-rule-set.json index 825415021ba..61804152356 100644 --- a/services/apigateway/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/apigateway/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -3,7 +3,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,64 +45,17 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "ref": "UseFIPS" }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + true ] } - ] + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" }, { "conditions": [ @@ -111,19 +63,51 @@ "fn": "booleanEquals", "argv": [ { - "ref": "UseFIPS" + "ref": "UseDualStack" }, true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "booleanEquals", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -133,90 +117,109 @@ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://apigateway-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -229,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apigateway.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://apigateway.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://apigateway.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://apigateway.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/apigateway/src/main/resources/codegen-resources/endpoint-tests.json b/services/apigateway/src/main/resources/codegen-resources/endpoint-tests.json index 0256617c6b8..3d5b86aed1f 100644 --- a/services/apigateway/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/apigateway/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,1136 +1,18 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-south-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-south-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-gov-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ca-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.us-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.af-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.af-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-north-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.me-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.sa-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.sa-east-1.amazonaws.com" + "url": "https://apigateway.af-south-1.amazonaws.com" } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "Region": "ap-east-1", "UseDualStack": false } }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.ap-east-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true - } - }, { "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -1139,535 +21,483 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-east-1", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-north-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://apigateway-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-gov-west-1.amazonaws.com" + "url": "https://apigateway.ap-northeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-gov-west-1", + "Region": "ap-northeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-gov-west-1.api.aws" + "url": "https://apigateway.ap-northeast-2.amazonaws.com" } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-gov-west-1.amazonaws.com" + "url": "https://apigateway.ap-northeast-3.amazonaws.com" } }, "params": { + "Region": "ap-northeast-3", "UseFIPS": false, - "Region": "us-gov-west-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-1.api.aws" + "url": "https://apigateway.ap-south-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-1.amazonaws.com" + "url": "https://apigateway.ap-southeast-1.amazonaws.com" } }, "params": { - "UseFIPS": true, "Region": "ap-southeast-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-1.api.aws" + "url": "https://apigateway.ap-southeast-2.amazonaws.com" } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-1.amazonaws.com" + "url": "https://apigateway.ca-central-1.amazonaws.com" } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "Region": "ap-southeast-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-2.api.aws" + "url": "https://apigateway.eu-central-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-2.amazonaws.com" + "url": "https://apigateway.eu-north-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-2", + "Region": "eu-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-2.api.aws" + "url": "https://apigateway.eu-south-1.amazonaws.com" } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-2.amazonaws.com" + "url": "https://apigateway.eu-west-1.amazonaws.com" } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "Region": "ap-southeast-2", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://apigateway.eu-west-2.amazonaws.com" + } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://apigateway.eu-west-3.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "us-iso-east-1", + "Region": "eu-west-3", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://apigateway.me-south-1.amazonaws.com" + } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-iso-east-1.c2s.ic.gov" + "url": "https://apigateway.sa-east-1.amazonaws.com" } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-3.api.aws" + "url": "https://apigateway.us-east-1.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-3.amazonaws.com" + "url": "https://apigateway.us-east-2.amazonaws.com" } }, "params": { - "UseFIPS": true, - "Region": "ap-southeast-3", + "Region": "us-east-2", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-3.api.aws" + "url": "https://apigateway.us-west-1.amazonaws.com" } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-3.amazonaws.com" + "url": "https://apigateway.us-west-2.amazonaws.com" } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "Region": "ap-southeast-3", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-4.api.aws" + "url": "https://apigateway-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.ap-southeast-4.amazonaws.com" + "url": "https://apigateway-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "ap-southeast-4", "UseDualStack": false } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-4.api.aws" + "url": "https://apigateway.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "ap-southeast-4", "UseDualStack": true } }, { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apigateway.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.ap-southeast-4.amazonaws.com" + "url": "https://apigateway.cn-northwest-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-northwest-1", "UseFIPS": false, - "Region": "ap-southeast-4", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-east-1.api.aws" + "url": "https://apigateway-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-east-1.amazonaws.com" + "url": "https://apigateway-fips.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.us-east-1.api.aws" + "url": "https://apigateway.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-east-1.amazonaws.com" + "url": "https://apigateway.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apigateway.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-east-2.api.aws" + "url": "https://apigateway-fips.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.us-east-2.amazonaws.com" + "url": "https://apigateway-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://apigateway.us-east-2.api.aws" + "url": "https://apigateway.us-gov-east-1.api.aws" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway.us-east-2.amazonaws.com" + "url": "https://apigateway.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-2", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://apigateway-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://apigateway-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "cn-northwest-1", "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://apigateway.cn-northwest-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "cn-northwest-1", "UseDualStack": true } }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://apigateway.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": false - } - }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -1679,8 +509,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -1690,8 +520,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -1703,21 +533,34 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1728,8 +571,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -1740,11 +583,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/apigateway/src/main/resources/codegen-resources/service-2.json b/services/apigateway/src/main/resources/codegen-resources/service-2.json index 70ba1fdc606..eb167860d6a 100644 --- a/services/apigateway/src/main/resources/codegen-resources/service-2.json +++ b/services/apigateway/src/main/resources/codegen-resources/service-2.json @@ -2138,7 +2138,7 @@ }, "customerId":{ "shape":"String", - "documentation":"

    An AWS Marketplace customer identifier , when integrating with the AWS SaaS Marketplace.

    " + "documentation":"

    An Amazon Web Services Marketplace customer identifier, when integrating with the Amazon Web Services SaaS Marketplace.

    " }, "description":{ "shape":"String", @@ -2475,7 +2475,7 @@ }, "customerId":{ "shape":"String", - "documentation":"

    An AWS Marketplace customer identifier , when integrating with the AWS SaaS Marketplace.

    " + "documentation":"

    An Amazon Web Services Marketplace customer identifier, when integrating with the Amazon Web Services SaaS Marketplace.

    " }, "tags":{ "shape":"MapOfStringToString", @@ -2689,7 +2689,7 @@ }, "certificateArn":{ "shape":"String", - "documentation":"

    The reference to an AWS-managed certificate that will be used by edge-optimized endpoint for this domain name. AWS Certificate Manager is the only supported source.

    " + "documentation":"

    The reference to an Amazon Web Services-managed certificate that will be used by edge-optimized endpoint for this domain name. Certificate Manager is the only supported source.

    " }, "regionalCertificateName":{ "shape":"String", @@ -2697,7 +2697,7 @@ }, "regionalCertificateArn":{ "shape":"String", - "documentation":"

    The reference to an AWS-managed certificate that will be used by regional endpoint for this domain name. AWS Certificate Manager is the only supported source.

    " + "documentation":"

    The reference to an Amazon Web Services-managed certificate that will be used by regional endpoint for this domain name. Certificate Manager is the only supported source.

    " }, "endpointConfiguration":{ "shape":"EndpointConfiguration", @@ -2834,7 +2834,7 @@ }, "apiKeySource":{ "shape":"ApiKeySourceType", - "documentation":"

    The source of the API key for metering requests according to a usage plan. Valid values are: >HEADER to read the API key from the X-API-Key header of a request. AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

    " + "documentation":"

    The source of the API key for metering requests according to a usage plan. Valid values are: HEADER to read the API key from the X-API-Key header of a request. AUTHORIZER to read the API key from the UsageIdentifierKey from a custom authorizer.

    " }, "endpointConfiguration":{ "shape":"EndpointConfiguration", @@ -2985,7 +2985,7 @@ }, "targetArns":{ "shape":"ListOfString", - "documentation":"

    The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same AWS account of the API owner.

    " + "documentation":"

    The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same Amazon Web Services account of the API owner.

    " }, "tags":{ "shape":"MapOfStringToString", @@ -3631,7 +3631,7 @@ }, "certificateArn":{ "shape":"String", - "documentation":"

    The reference to an AWS-managed certificate that will be used by edge-optimized endpoint for this domain name. AWS Certificate Manager is the only supported source.

    " + "documentation":"

    The reference to an Amazon Web Services-managed certificate that will be used by edge-optimized endpoint for this domain name. Certificate Manager is the only supported source.

    " }, "certificateUploadDate":{ "shape":"Timestamp", @@ -3651,7 +3651,7 @@ }, "regionalCertificateArn":{ "shape":"String", - "documentation":"

    The reference to an AWS-managed certificate that will be used for validating the regional domain name. AWS Certificate Manager is the only supported source.

    " + "documentation":"

    The reference to an Amazon Web Services-managed certificate that will be used for validating the regional domain name. Certificate Manager is the only supported source.

    " }, "distributionDomainName":{ "shape":"String", @@ -3931,7 +3931,7 @@ }, "customerId":{ "shape":"String", - "documentation":"

    The identifier of a customer in AWS Marketplace or an external system, such as a developer portal.

    ", + "documentation":"

    The identifier of a customer in Amazon Web Services Marketplace or an external system, such as a developer portal.

    ", "location":"querystring", "locationName":"customerId" }, @@ -5041,7 +5041,7 @@ }, "mode":{ "shape":"PutMode", - "documentation":"

    A query parameter to indicate whether to overwrite (OVERWRITE) any existing DocumentationParts definition or to merge (MERGE) the new definition into the existing one. The default value is MERGE.

    ", + "documentation":"

    A query parameter to indicate whether to overwrite (overwrite) any existing DocumentationParts definition or to merge (merge) the new definition into the existing one. The default value is merge.

    ", "location":"querystring", "locationName":"mode" }, @@ -5071,7 +5071,7 @@ }, "parameters":{ "shape":"MapOfStringToString", - "documentation":"

    A key-value map of context-specific query string parameters specifying the behavior of different API importing operations. The following shows operation-specific parameters and their supported values.

    To exclude DocumentationParts from the import, set parameters as ignore=documentation.

    To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE, endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. The default endpoint type is EDGE.

    To handle imported basepath, set parameters as basepath=ignore, basepath=prepend or basepath=split.

    For example, the AWS CLI command to exclude documentation from the imported API is:

    The AWS CLI command to set the regional endpoint on the imported API is:

    ", + "documentation":"

    A key-value map of context-specific query string parameters specifying the behavior of different API importing operations. The following shows operation-specific parameters and their supported values.

    To exclude DocumentationParts from the import, set parameters as ignore=documentation.

    To configure the endpoint type, set parameters as endpointConfigurationTypes=EDGE, endpointConfigurationTypes=REGIONAL, or endpointConfigurationTypes=PRIVATE. The default endpoint type is EDGE.

    To handle imported basepath, set parameters as basepath=ignore, basepath=prepend or basepath=split.

    ", "location":"querystring" }, "body":{ @@ -5092,11 +5092,11 @@ }, "httpMethod":{ "shape":"String", - "documentation":"

    Specifies the integration's HTTP method type.

    " + "documentation":"

    Specifies the integration's HTTP method type. For the Type property, if you specify MOCK, this property is optional. For Lambda integrations, you must set the integration method to POST. For all other types, you must specify this property.

    " }, "uri":{ "shape":"String", - "documentation":"

    Specifies Uniform Resource Identifier (URI) of the integration endpoint.

    For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification, for either standard integration, where connectionType is not VPC_LINK, or private integration, where connectionType is VPC_LINK. For a private HTTP integration, the URI is not used for routing. For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated Amazon Web Services service (e.g., s3); and {subdomain} is a designated subdomain supported by certain Amazon Web Services service for fast host-name lookup. action can be used for an Amazon Web Services service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an AWS service path-based API. The ensuing service_api refers to the path to an Amazon Web Services service resource, including the region of the integrated Amazon Web Services service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

    " + "documentation":"

    Specifies Uniform Resource Identifier (URI) of the integration endpoint.

    For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) URL according to the RFC-3986 specification for standard integrations. If connectionType is VPC_LINK specify the Network Load Balancer DNS name. For AWS or AWS_PROXY integrations, the URI is of the form arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the name of the integrated Amazon Web Services service (e.g., s3); and {subdomain} is a designated subdomain supported by certain Amazon Web Services service for fast host-name lookup. action can be used for an Amazon Web Services service action-based API, using an Action={name}&{p1}={v1}&p2={v2}... query string. The ensuing {service_api} refers to a supported action {name} plus any required input parameters. Alternatively, path can be used for an Amazon Web Services service path-based API. The ensuing service_api refers to the path to an Amazon Web Services service resource, including the region of the integrated Amazon Web Services service, if applicable. For example, for integration with the S3 API of GetObject, the uri can be either arn:aws:apigateway:us-west-2:s3:action/GetObject&Bucket={bucket}&Key={key} or arn:aws:apigateway:us-west-2:s3:path/{bucket}/{key}

    " }, "connectionType":{ "shape":"ConnectionType", @@ -5108,7 +5108,7 @@ }, "credentials":{ "shape":"String", - "documentation":"

    Specifies the credentials required for the integration, if any. For AWS integrations, three options are available. To specify an IAM Role for API Gateway to assume, use the role's Amazon Resource Name (ARN). To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\\*:user/\\*. To use resource-based permissions on supported AWS services, specify null.

    " + "documentation":"

    Specifies the credentials required for the integration, if any. For AWS integrations, three options are available. To specify an IAM Role for API Gateway to assume, use the role's Amazon Resource Name (ARN). To require that the caller's identity be passed through from the request, specify the string arn:aws:iam::\\*:user/\\*. To use resource-based permissions on supported Amazon Web Services services, specify null.

    " }, "requestParameters":{ "shape":"MapOfStringToString", @@ -5147,7 +5147,7 @@ "documentation":"

    Specifies the TLS configuration for an integration.

    " } }, - "documentation":"

    Represents an HTTP, HTTP_PROXY, AWS, AWS_PROXY, or Mock integration.

    " + "documentation":"

    Represents an HTTP, HTTP_PROXY, AWS, AWS_PROXY, or Mock integration.

    " }, "IntegrationResponse":{ "type":"structure", @@ -5158,7 +5158,7 @@ }, "selectionPattern":{ "shape":"String", - "documentation":"

    Specifies the regular expression (regex) pattern used to choose an integration response based on the response from the back end. For example, if the success response returns nothing and the error response returns some string, you could use the .+ regex to match error response. However, make sure that the error response does not contain any newline (\\n) character in such cases. If the back end is an AWS Lambda function, the AWS Lambda function error header is matched. For all other HTTP and AWS back ends, the HTTP status code is matched.

    " + "documentation":"

    Specifies the regular expression (regex) pattern used to choose an integration response based on the response from the back end. For example, if the success response returns nothing and the error response returns some string, you could use the .+ regex to match error response. However, make sure that the error response does not contain any newline (\\n) character in such cases. If the back end is an Lambda function, the Lambda function error header is matched. For all other HTTP and Amazon Web Services back ends, the HTTP status code is matched.

    " }, "responseParameters":{ "shape":"MapOfStringToString", @@ -5177,7 +5177,7 @@ }, "IntegrationType":{ "type":"string", - "documentation":"

    The integration type. The valid value is HTTP for integrating an API method with an HTTP backend; AWS with any AWS service endpoints; MOCK for testing without actually invoking the backend; HTTP_PROXY for integrating with the HTTP proxy integration; AWS_PROXY for integrating with the Lambda proxy integration.

    ", + "documentation":"

    The integration type. The valid value is HTTP for integrating an API method with an HTTP backend; AWS with any Amazon Web Services service endpoints; MOCK for testing without actually invoking the backend; HTTP_PROXY for integrating with the HTTP proxy integration; AWS_PROXY for integrating with the Lambda proxy integration.

    ", "enum":[ "HTTP", "AWS", @@ -5440,43 +5440,43 @@ "members":{ "metricsEnabled":{ "shape":"Boolean", - "documentation":"

    Specifies whether Amazon CloudWatch metrics are enabled for this method. The PATCH path for this setting is /{method_setting_key}/metrics/enabled, and the value is a Boolean.

    " + "documentation":"

    Specifies whether Amazon CloudWatch metrics are enabled for this method.

    " }, "loggingLevel":{ "shape":"String", - "documentation":"

    Specifies the logging level for this method, which affects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, and the available levels are OFF, ERROR, and INFO. Choose ERROR to write only error-level entries to CloudWatch Logs, or choose INFO to include all ERROR events as well as extra informational events.

    " + "documentation":"

    Specifies the logging level for this method, which affects the log entries pushed to Amazon CloudWatch Logs. Valid values are OFF, ERROR, and INFO. Choose ERROR to write only error-level entries to CloudWatch Logs, or choose INFO to include all ERROR events as well as extra informational events.

    " }, "dataTraceEnabled":{ "shape":"Boolean", - "documentation":"

    Specifies whether data trace logging is enabled for this method, which affects the log entries pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/dataTrace, and the value is a Boolean.

    " + "documentation":"

    Specifies whether data trace logging is enabled for this method, which affects the log entries pushed to Amazon CloudWatch Logs.

    " }, "throttlingBurstLimit":{ "shape":"Integer", - "documentation":"

    Specifies the throttling burst limit. The PATCH path for this setting is /{method_setting_key}/throttling/burstLimit, and the value is an integer.

    " + "documentation":"

    Specifies the throttling burst limit.

    " }, "throttlingRateLimit":{ "shape":"Double", - "documentation":"

    Specifies the throttling rate limit. The PATCH path for this setting is /{method_setting_key}/throttling/rateLimit, and the value is a double.

    " + "documentation":"

    Specifies the throttling rate limit.

    " }, "cachingEnabled":{ "shape":"Boolean", - "documentation":"

    Specifies whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached. The PATCH path for this setting is /{method_setting_key}/caching/enabled, and the value is a Boolean.

    " + "documentation":"

    Specifies whether responses should be cached and returned for requests. A cache cluster must be enabled on the stage for responses to be cached.

    " }, "cacheTtlInSeconds":{ "shape":"Integer", - "documentation":"

    Specifies the time to live (TTL), in seconds, for cached responses. The higher the TTL, the longer the response will be cached. The PATCH path for this setting is /{method_setting_key}/caching/ttlInSeconds, and the value is an integer.

    " + "documentation":"

    Specifies the time to live (TTL), in seconds, for cached responses. The higher the TTL, the longer the response will be cached.

    " }, "cacheDataEncrypted":{ "shape":"Boolean", - "documentation":"

    Specifies whether the cached responses are encrypted. The PATCH path for this setting is /{method_setting_key}/caching/dataEncrypted, and the value is a Boolean.

    " + "documentation":"

    Specifies whether the cached responses are encrypted.

    " }, "requireAuthorizationForCacheControl":{ "shape":"Boolean", - "documentation":"

    Specifies whether authorization is required for a cache invalidation request. The PATCH path for this setting is /{method_setting_key}/caching/requireAuthorizationForCacheControl, and the value is a Boolean.

    " + "documentation":"

    Specifies whether authorization is required for a cache invalidation request.

    " }, "unauthorizedCacheControlHeaderStrategy":{ "shape":"UnauthorizedCacheControlHeaderStrategy", - "documentation":"

    Specifies how to handle unauthorized requests for cache invalidation. The PATCH path for this setting is /{method_setting_key}/caching/unauthorizedCacheControlHeaderStrategy, and the available values are FAIL_WITH_403, SUCCEED_WITH_RESPONSE_HEADER, SUCCEED_WITHOUT_RESPONSE_HEADER.

    " + "documentation":"

    Specifies how to handle unauthorized requests for cache invalidation.

    " } }, "documentation":"

    Specifies the method setting properties.

    " @@ -6088,6 +6088,10 @@ "disableExecuteApiEndpoint":{ "shape":"Boolean", "documentation":"

    Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint.

    " + }, + "rootResourceId":{ + "shape":"String", + "documentation":"

    The API's root resource ID.

    " } }, "documentation":"

    Represents a REST API.

    " @@ -6412,7 +6416,7 @@ }, "latency":{ "shape":"Long", - "documentation":"

    The execution latency of the test authorizer request.

    " + "documentation":"

    The execution latency, in ms, of the test authorizer request.

    " }, "principalId":{ "shape":"String", @@ -6511,7 +6515,7 @@ }, "latency":{ "shape":"Long", - "documentation":"

    The execution latency of the test invoke request.

    " + "documentation":"

    The execution latency, in ms, of the test invoke request.

    " } }, "documentation":"

    Represents the response of the test invoke request in the HTTP method.

    " @@ -6751,7 +6755,7 @@ "members":{ "restApiId":{ "shape":"String", - "documentation":"

    The string identifier of the associated RestApi..

    ", + "documentation":"

    The string identifier of the associated RestApi.

    ", "location":"uri", "locationName":"restapi_id" }, @@ -7191,7 +7195,7 @@ }, "productCode":{ "shape":"String", - "documentation":"

    The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.

    " + "documentation":"

    The Amazon Web Services Marketplace product identifier to associate with the usage plan as a SaaS product on the Amazon Web Services Marketplace.

    " }, "tags":{ "shape":"MapOfStringToString", @@ -7263,7 +7267,7 @@ }, "targetArns":{ "shape":"ListOfString", - "documentation":"

    The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same AWS account of the API owner.

    " + "documentation":"

    The ARN of the network load balancer of the VPC targeted by the VPC link. The network load balancer must be owned by the same Amazon Web Services account of the API owner.

    " }, "status":{ "shape":"VpcLinkStatus", @@ -7302,5 +7306,5 @@ "documentation":"

    The collection of VPC links under the caller's account in a region.

    " } }, - "documentation":"Amazon API Gateway

    Amazon API Gateway helps developers deliver robust, secure, and scalable mobile and web application back ends. API Gateway allows developers to securely connect mobile and web applications to APIs that run on AWS Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS.

    " + "documentation":"Amazon API Gateway

    Amazon API Gateway helps developers deliver robust, secure, and scalable mobile and web application back ends. API Gateway allows developers to securely connect mobile and web applications to APIs that run on Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS.

    " } diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index 608878a4083..a1ba2da1548 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 68937fc6282..4aa2b699f82 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 05f5db83bc2..79814b0a706 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfigdata/pom.xml b/services/appconfigdata/pom.xml index 81d7c63e533..93fbf164e3b 100644 --- a/services/appconfigdata/pom.xml +++ b/services/appconfigdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT appconfigdata AWS Java SDK :: Services :: App Config Data diff --git a/services/appfabric/pom.xml b/services/appfabric/pom.xml index 8238986e198..510ad7e0851 100644 --- a/services/appfabric/pom.xml +++ b/services/appfabric/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT appfabric AWS Java SDK :: Services :: App Fabric diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index e7969fad583..8a161697df5 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index feed4b1f497..3c0e220a787 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index 902912c3295..bf9ffb8b81f 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index c235229a287..66c983e883b 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 0f37c85e99b..ca616d66715 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config b/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config index 4864947dd29..d46992d3cb0 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config @@ -9,7 +9,7 @@ "describeExportConfigurations", "getDiscoverySummary" ], - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "startContinuousExport", "describeContinuousExports" ], diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 95f7a902d9c..6abc76a6a02 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index e65154687d1..0aa95640d84 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index a4fc93a3c02..94ca457b68c 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 89cabb2780d..bfe07353160 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appstream/src/main/resources/codegen-resources/customization.config b/services/appstream/src/main/resources/codegen-resources/customization.config index d6ce3dce1a2..b605d120b9a 100644 --- a/services/appstream/src/main/resources/codegen-resources/customization.config +++ b/services/appstream/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "updateFleet", "describeUserStackAssociations" ], diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index bd59a5f2f4e..0ef2f11a355 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT appsync diff --git a/services/arczonalshift/pom.xml b/services/arczonalshift/pom.xml index f34ff49ae46..43d35bd2c57 100644 --- a/services/arczonalshift/pom.xml +++ b/services/arczonalshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT arczonalshift AWS Java SDK :: Services :: ARC Zonal Shift diff --git a/services/athena/pom.xml b/services/athena/pom.xml index e33cbf0286f..359b6b4fc4b 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index ed2071e60a3..73bf7d876e7 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index da669963b87..c9e98a84fbd 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index 03292056a06..b9a77af87c3 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 17f10c5b37b..1fb054b83a3 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backup/src/main/resources/codegen-resources/customization.config b/services/backup/src/main/resources/codegen-resources/customization.config index 6dc94898135..1cb199bb471 100644 --- a/services/backup/src/main/resources/codegen-resources/customization.config +++ b/services/backup/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "getSupportedResourceTypes" ], "verifiedSimpleMethods" : [ diff --git a/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json index 49fbf7acba9..dc2e1fc92d9 100644 --- a/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/backup/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://backup-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://backup-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://backup-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backup.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://backup.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://backup.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://backup.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/backup/src/main/resources/codegen-resources/service-2.json b/services/backup/src/main/resources/codegen-resources/service-2.json index 3e72248e09a..379cbe80b72 100644 --- a/services/backup/src/main/resources/codegen-resources/service-2.json +++ b/services/backup/src/main/resources/codegen-resources/service-2.json @@ -135,7 +135,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    This request creates a logical container where backups are stored.

    This request includes a name, optionally one or more resource tags, an encryption key, and a request ID.

    Do not include sensitive data, such as passport numbers, in the name of a backup vault.

    ", + "documentation":"

    This request creates a logical container to where backups may be copied.

    This request includes a name, the Region, the maximum number of retention days, the minimum number of retention days, and optionally can include tags and a creator request ID.

    Do not include sensitive data, such as passport numbers, in the name of a backup vault.

    ", "idempotent":true }, "CreateReportPlan":{ @@ -1580,6 +1580,10 @@ "EnableContinuousBackup":{ "shape":"Boolean", "documentation":"

    Specifies whether Backup creates continuous backups. True causes Backup to create continuous backups capable of point-in-time restore (PITR). False (or not specified) causes Backup to create snapshot backups.

    " + }, + "ScheduleExpressionTimezone":{ + "shape":"Timezone", + "documentation":"

    This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.

    " } }, "documentation":"

    Specifies a scheduled task used to back up a selection of resources.

    " @@ -1626,6 +1630,10 @@ "EnableContinuousBackup":{ "shape":"Boolean", "documentation":"

    Specifies whether Backup creates continuous backups. True causes Backup to create continuous backups capable of point-in-time restore (PITR). False (or not specified) causes Backup to create snapshot backups.

    " + }, + "ScheduleExpressionTimezone":{ + "shape":"Timezone", + "documentation":"

    This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.

    " } }, "documentation":"

    Specifies a scheduled task used to back up a selection of resources.

    " @@ -3279,7 +3287,7 @@ }, "ControlScope":{ "shape":"ControlScope", - "documentation":"

    The scope of a control. The control scope defines what the control will evaluate. Three examples of control scopes are: a specific backup plan, all backup plans with a specific tag, or all backup plans. For more information, see ControlScope.

    " + "documentation":"

    The scope of a control. The control scope defines what the control will evaluate. Three examples of control scopes are: a specific backup plan, all backup plans with a specific tag, or all backup plans.

    " } }, "documentation":"

    Contains detailed information about all of the controls of a framework. Each framework must contain at least one control.

    " @@ -5486,6 +5494,7 @@ "value":{"shape":"TagValue"}, "sensitive":true }, + "Timezone":{"type":"string"}, "UntagResourceInput":{ "type":"structure", "required":[ diff --git a/services/backupgateway/pom.xml b/services/backupgateway/pom.xml index cbae64ce942..66c226e0502 100644 --- a/services/backupgateway/pom.xml +++ b/services/backupgateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT backupgateway AWS Java SDK :: Services :: Backup Gateway diff --git a/services/backupstorage/pom.xml b/services/backupstorage/pom.xml index d1ea59048cd..d6975aca4a1 100644 --- a/services/backupstorage/pom.xml +++ b/services/backupstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT backupstorage AWS Java SDK :: Services :: Backup Storage diff --git a/services/batch/pom.xml b/services/batch/pom.xml index 5fea8f4a9f9..0d59a196a3c 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/batch/src/main/resources/codegen-resources/customization.config b/services/batch/src/main/resources/codegen-resources/customization.config index c597ff8c197..ef427d6052c 100644 --- a/services/batch/src/main/resources/codegen-resources/customization.config +++ b/services/batch/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "describeJobDefinitions", "describeJobQueues" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "listJobs" ] } diff --git a/services/billingconductor/pom.xml b/services/billingconductor/pom.xml index 07e1650b12a..456c0475274 100644 --- a/services/billingconductor/pom.xml +++ b/services/billingconductor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT billingconductor AWS Java SDK :: Services :: Billingconductor diff --git a/services/braket/pom.xml b/services/braket/pom.xml index a6f0a8a5239..357fae318bd 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 1d242abad21..9f988975280 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/chime/pom.xml b/services/chime/pom.xml index 07fbf715749..c261be32761 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml index 69830834d09..5167eba8c83 100644 --- a/services/chimesdkidentity/pom.xml +++ b/services/chimesdkidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT chimesdkidentity AWS Java SDK :: Services :: Chime SDK Identity diff --git a/services/chimesdkmediapipelines/pom.xml b/services/chimesdkmediapipelines/pom.xml index 7f2dc6752c3..7b24c569553 100644 --- a/services/chimesdkmediapipelines/pom.xml +++ b/services/chimesdkmediapipelines/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT chimesdkmediapipelines AWS Java SDK :: Services :: Chime SDK Media Pipelines diff --git a/services/chimesdkmeetings/pom.xml b/services/chimesdkmeetings/pom.xml index 2a490374a84..ad7fb8e156d 100644 --- a/services/chimesdkmeetings/pom.xml +++ b/services/chimesdkmeetings/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT chimesdkmeetings AWS Java SDK :: Services :: Chime SDK Meetings diff --git a/services/chimesdkmeetings/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/chimesdkmeetings/src/main/resources/codegen-resources/endpoint-rule-set.json index ba8bb30e19c..b1a80964628 100644 --- a/services/chimesdkmeetings/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/chimesdkmeetings/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://meetings-chime-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://meetings-chime-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://meetings-chime-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://meetings-chime-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://meetings-chime.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://meetings-chime.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://meetings-chime.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://meetings-chime.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/chimesdkmeetings/src/main/resources/codegen-resources/service-2.json b/services/chimesdkmeetings/src/main/resources/codegen-resources/service-2.json index de440d20ddc..ef969e22418 100644 --- a/services/chimesdkmeetings/src/main/resources/codegen-resources/service-2.json +++ b/services/chimesdkmeetings/src/main/resources/codegen-resources/service-2.json @@ -46,7 +46,9 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Updates AttendeeCapabilities except the capabilities listed in an ExcludedAttendeeIds table.

    You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see .

    When using capabilities, be aware of these corner cases:

    • You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive.

    • When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants.

    • When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.

    " }, @@ -81,6 +83,7 @@ "output":{"shape":"CreateMeetingResponse"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, {"shape":"ForbiddenException"}, {"shape":"UnauthorizedException"}, {"shape":"ThrottlingException"}, @@ -100,6 +103,7 @@ "output":{"shape":"CreateMeetingWithAttendeesResponse"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, {"shape":"ForbiddenException"}, {"shape":"UnauthorizedException"}, {"shape":"ThrottlingException"}, @@ -215,6 +219,13 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

    Returns a list of the tags available for the specified resource.

    " @@ -271,6 +282,12 @@ "output":{"shape":"TagResourceResponse"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"TooManyTagsException"} ], @@ -287,6 +304,12 @@ "output":{"shape":"UntagResourceResponse"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

    Removes the specified tags from the specified resources. When you specify a tag key, the action removes both that key and its associated value. The operation succeeds even if you attempt to remove tags from a resource that were already removed. Note the following:

    • To remove tags from a resource, you need the necessary permissions for the service that the resource belongs to as well as permissions for removing tags. For more information, see the documentation for the service whose resource you want to untag.

    • You can only tag resources that are located in the specified AWS Region for the calling AWS account.

    Minimum permissions

    In addition to the tag:UntagResources permission required by this operation, you must also have the remove tags permission defined by the service that created the resource. For example, to remove the tags from an Amazon EC2 instance using the UntagResources operation, you must have both of the following permissions:

    tag:UntagResource

    ChimeSDKMeetings:DeleteTags

    " @@ -305,7 +328,9 @@ {"shape":"UnauthorizedException"}, {"shape":"NotFoundException"}, {"shape":"ForbiddenException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    The capabilities that you want to update.

    You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see .

    When using capabilities, be aware of these corner cases:

    • You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive.

    • When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants.

    • When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.

    " } diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml index f4558557dfc..0f1f0e927b0 100644 --- a/services/chimesdkmessaging/pom.xml +++ b/services/chimesdkmessaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT chimesdkmessaging AWS Java SDK :: Services :: Chime SDK Messaging diff --git a/services/chimesdkvoice/pom.xml b/services/chimesdkvoice/pom.xml index f006831979b..36873e01932 100644 --- a/services/chimesdkvoice/pom.xml +++ b/services/chimesdkvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT chimesdkvoice AWS Java SDK :: Services :: Chime SDK Voice diff --git a/services/cleanrooms/pom.xml b/services/cleanrooms/pom.xml index 18dafc6f301..b80c0dde885 100644 --- a/services/cleanrooms/pom.xml +++ b/services/cleanrooms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cleanrooms AWS Java SDK :: Services :: Clean Rooms diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index a777b374e60..da197befb36 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 cloud9 diff --git a/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json index fbaea21794b..26763c88c97 100644 --- a/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloud9/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloud9-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://cloud9-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cloud9-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://cloud9-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cloud9.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://cloud9.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cloud9.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://cloud9.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloudcontrol/pom.xml b/services/cloudcontrol/pom.xml index c6f585ad8ac..560cd3feaaf 100644 --- a/services/cloudcontrol/pom.xml +++ b/services/cloudcontrol/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudcontrol AWS Java SDK :: Services :: Cloud Control diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index 595227af12e..ef1966fe4cb 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 3b4121c4d65..d4552c3b672 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/customization.config b/services/cloudformation/src/main/resources/codegen-resources/customization.config index a88012d0b42..e7b4b0ab5bf 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/customization.config +++ b/services/cloudformation/src/main/resources/codegen-resources/customization.config @@ -17,7 +17,7 @@ ] } }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "estimateTemplateCost", "validateTemplate", "getTemplate", diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index 18d3adcb72b..f3fca6d4808 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index 994b7e24db3..2685523b522 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsm/src/main/resources/codegen-resources/customization.config b/services/cloudhsm/src/main/resources/codegen-resources/customization.config index df5ba497398..7477c0a69fc 100644 --- a/services/cloudhsm/src/main/resources/codegen-resources/customization.config +++ b/services/cloudhsm/src/main/resources/codegen-resources/customization.config @@ -14,7 +14,7 @@ "exclude": [ "retryable" ] } }, - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "describeHsm", "describeLunaClient" ], diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index 14b47fac8ee..53b5d21cb38 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 3935738f819..71247041c13 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index 95d98190035..328b2a033de 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index b1c5cfaaab3..10807f16400 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json index 60f77595e8a..de051391941 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json @@ -158,7 +158,8 @@ {"shape":"OrganizationNotInAllFeaturesModeException"}, {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"CloudTrailInvalidClientTokenIdException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Creates a trail that specifies the settings for delivery of log data to an Amazon S3 bucket.

    ", "idempotent":true @@ -235,6 +236,7 @@ {"shape":"InvalidTrailNameException"}, {"shape":"CloudTrailARNInvalidException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, @@ -393,7 +395,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"InsightNotEnabledException"}, - {"shape":"NoManagementAccountSLRExistsException"} + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Describes the settings for the Insights event selectors that you configured for your trail. GetInsightSelectors shows if CloudTrail Insights event logging is enabled on the trail, and if it is, which insight types are enabled. If you run GetInsightSelectors on a trail that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException

    For more information, see Logging CloudTrail Insights Events for Trails in the CloudTrail User Guide.

    ", "idempotent":true @@ -660,6 +663,7 @@ {"shape":"InvalidHomeRegionException"}, {"shape":"InvalidEventSelectorsException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"NotOrganizationMasterAccountException"}, @@ -690,7 +694,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, {"shape":"NotOrganizationMasterAccountException"}, - {"shape":"NoManagementAccountSLRExistsException"} + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. The valid Insights event types in this release are ApiErrorRateInsight and ApiCallRateInsight.

    To log CloudTrail Insights events on API call volume, the trail must log write management events. To log CloudTrail Insights events on API error rate, the trail must log read or write management events. You can call GetEventSelectors on a trail to check whether the trail logs management events.

    ", "idempotent":true @@ -851,6 +856,7 @@ "errors":[ {"shape":"CloudTrailARNInvalidException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"TrailNotFoundException"}, {"shape":"InvalidTrailNameException"}, {"shape":"InvalidHomeRegionException"}, @@ -941,6 +947,7 @@ {"shape":"InvalidTrailNameException"}, {"shape":"CloudTrailARNInvalidException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"UnsupportedOperationException"}, {"shape":"OperationNotPermittedException"}, @@ -1029,6 +1036,7 @@ {"shape":"InvalidEventSelectorsException"}, {"shape":"CloudTrailARNInvalidException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidParameterCombinationException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"KmsKeyNotFoundException"}, @@ -4015,6 +4023,13 @@ "max":200 }, "TerminationProtectionEnabled":{"type":"boolean"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + }, + "documentation":"

    This exception is thrown when the request rate exceeds the limit.

    ", + "exception":true + }, "Trail":{ "type":"structure", "members":{ diff --git a/services/cloudtraildata/pom.xml b/services/cloudtraildata/pom.xml index 43c6e00ade4..f0d417aabd5 100644 --- a/services/cloudtraildata/pom.xml +++ b/services/cloudtraildata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudtraildata AWS Java SDK :: Services :: Cloud Trail Data diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index c482b63caf9..1d9bc4bb330 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatch/src/main/resources/codegen-resources/customization.config b/services/cloudwatch/src/main/resources/codegen-resources/customization.config index 5c580ec8563..d819241c371 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/customization.config +++ b/services/cloudwatch/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ "listDashboards", "listMetrics" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deleteDashboards", "putDashboard", "getDashboard" diff --git a/services/cloudwatch/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cloudwatch/src/main/resources/codegen-resources/endpoint-rule-set.json index 94f8f398869..39504ccc876 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://monitoring-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://monitoring-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,155 +225,115 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://monitoring.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://monitoring-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://monitoring.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://monitoring-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://monitoring.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://monitoring.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://monitoring.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://monitoring.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json index c3f0b4d5bd0..a0547661723 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json @@ -1342,7 +1342,7 @@ }, "AlarmTypes":{ "shape":"AlarmTypes", - "documentation":"

    Use this parameter to specify whether you want the operation to return metric alarms or composite alarms. If you omit this parameter, only metric alarms are returned.

    " + "documentation":"

    Use this parameter to specify whether you want the operation to return metric alarms or composite alarms. If you omit this parameter, only metric alarms are returned, even if composite alarms exist in the account.

    For example, if you omit this parameter or specify MetricAlarms, the operation returns only a list of metric alarms. It does not return any composite alarms, even if composite alarms exist in the account.

    If you specify CompositeAlarms, the operation returns only a list of composite alarms, and does not return any metric alarms.

    " }, "ChildrenOfAlarmName":{ "shape":"AlarmName", @@ -1662,7 +1662,7 @@ }, "OrderBy":{ "shape":"InsightRuleOrderBy", - "documentation":"

    Determines what statistic to use to rank the contributors. Valid values are SUM and MAXIMUM.

    " + "documentation":"

    Determines what statistic to use to rank the contributors. Valid values are Sum and Maximum.

    " } } }, @@ -2376,7 +2376,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the CloudWatch resource that you want to view tags for.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " + "documentation":"

    The ARN of the CloudWatch resource that you want to view tags for.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " } } }, @@ -3247,7 +3247,7 @@ }, "ExtendedStatistic":{ "shape":"ExtendedStatistic", - "documentation":"

    The percentile statistic for the metric specified in MetricName. Specify a value between p0.0 and p100. When you call PutMetricAlarm and specify a MetricName, you must specify either Statistic or ExtendedStatistic, but not both.

    " + "documentation":"

    The extended statistic for the metric specified in MetricName. When you call PutMetricAlarm and specify a MetricName, you must specify either Statistic or ExtendedStatistic but not both.

    If you specify ExtendedStatistic, the following are valid values:

    • p90

    • tm90

    • tc90

    • ts90

    • wm90

    • IQM

    • PR(n:m) where n and m are values of the metric

    • TC(X%:X%) where X is between 10 and 90 inclusive.

    • TM(X%:X%) where X is between 10 and 90 inclusive.

    • TS(X%:X%) where X is between 10 and 90 inclusive.

    • WM(X%:X%) where X is between 10 and 90 inclusive.

    For more information about these extended statistics, see CloudWatch statistics definitions.

    " }, "Dimensions":{ "shape":"Dimensions", @@ -3291,7 +3291,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

    A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

    If you are using this operation to update an existing alarm, any tags you specify in this parameter are ignored. To change the tags of an existing alarm, use TagResource or UntagResource.

    " + "documentation":"

    A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm. To be able to associate tags with the alarm when you create the alarm, you must have the cloudwatch:TagResource permission.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

    If you are using this operation to update an existing alarm, any tags you specify in this parameter are ignored. To change the tags of an existing alarm, use TagResource or UntagResource.

    " }, "ThresholdMetricId":{ "shape":"MetricId", @@ -3676,7 +3676,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the CloudWatch resource that you're adding tags to.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " + "documentation":"

    The ARN of the CloudWatch resource that you're adding tags to.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " }, "Tags":{ "shape":"TagList", @@ -3720,7 +3720,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The ARN of the CloudWatch resource that you're removing tags from.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " + "documentation":"

    The ARN of the CloudWatch resource that you're removing tags from.

    The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

    The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule/insight-rule-name

    For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

    " }, "TagKeys":{ "shape":"TagKeyList", diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index f17f1a9e29a..e6a7e553402 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index ffc4d44d20a..0ddf6cad8a5 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config b/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config index dc10a538fc4..f61c7f5add4 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "deleteResourcePolicy", "putResourcePolicy" ], diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index 23cf96e5b2b..9ab36627eb4 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 23c6afa5bc1..570db166c45 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codecatalyst/pom.xml b/services/codecatalyst/pom.xml index e6c1b53be73..d0cd940fd9b 100644 --- a/services/codecatalyst/pom.xml +++ b/services/codecatalyst/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codecatalyst AWS Java SDK :: Services :: Code Catalyst diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index de0becc06b9..87f6268b61f 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codecommit/src/main/resources/codegen-resources/customization.config b/services/codecommit/src/main/resources/codegen-resources/customization.config index ca10e18215b..18a06a2c508 100644 --- a/services/codecommit/src/main/resources/codegen-resources/customization.config +++ b/services/codecommit/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "listRepositories" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "getBranch" ] } diff --git a/services/codecommit/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/codecommit/src/main/resources/codegen-resources/endpoint-rule-set.json index 09eaf8c3674..96a72fcdcaa 100644 --- a/services/codecommit/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/codecommit/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,143 +140,86 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://codecommit-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://codecommit-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -238,78 +232,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://codecommit.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://codecommit.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://codecommit.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://codecommit.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/codecommit/src/main/resources/codegen-resources/endpoint-tests.json b/services/codecommit/src/main/resources/codegen-resources/endpoint-tests.json index 3c91d4fa56e..af0933d991f 100644 --- a/services/codecommit/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/codecommit/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,94 +1,68 @@ { "testCases": [ { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-south-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-south-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-south-2.api.aws" + "url": "https://codecommit.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-south-2.amazonaws.com" + "url": "https://codecommit.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-2", - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-south-1.api.aws" + "url": "https://codecommit.ap-northeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-south-1.amazonaws.com" + "url": "https://codecommit.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-south-1.api.aws" + "url": "https://codecommit.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -99,204 +73,35 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ca-central-1.amazonaws.com" + "url": "https://codecommit.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ca-central-1.api.aws" + "url": "https://codecommit.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -307,1166 +112,521 @@ } }, "params": { - "UseDualStack": false, "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.sa-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://codecommit-fips.sa-east-1.amazonaws.com" + "url": "https://codecommit-fips.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.sa-east-1.api.aws" + "url": "https://codecommit.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.sa-east-1.amazonaws.com" + "url": "https://codecommit.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-east-1.api.aws" + "url": "https://codecommit.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-east-1.amazonaws.com" + "url": "https://codecommit.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-east-1.api.aws" + "url": "https://codecommit.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-east-1.amazonaws.com" + "url": "https://codecommit.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://codecommit.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.cn-north-1.amazonaws.com.cn" + "url": "https://codecommit.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://codecommit.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.cn-north-1.amazonaws.com.cn" + "url": "https://codecommit-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-gov-west-1.api.aws" + "url": "https://codecommit.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-gov-west-1.amazonaws.com" + "url": "https://codecommit-fips.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "us-east-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.us-gov-west-1.api.aws" + "url": "https://codecommit.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.us-gov-west-1.amazonaws.com" + "url": "https://codecommit-fips.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-1.api.aws" + "url": "https://codecommit.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-1.amazonaws.com" + "url": "https://codecommit-fips.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-1.api.aws" + "url": "https://codecommit-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-1.amazonaws.com" + "url": "https://codecommit.us-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-2.api.aws" + "url": "https://codecommit.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-2.amazonaws.com" + "url": "https://codecommit.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-2.api.aws" + "url": "https://codecommit-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-2.amazonaws.com" + "url": "https://codecommit-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-3.api.aws" + "url": "https://codecommit.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.ap-southeast-3.amazonaws.com" + "url": "https://codecommit.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-3.api.aws" + "url": "https://codecommit-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.ap-southeast-3.amazonaws.com" + "url": "https://codecommit.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-east-1.api.aws" + "url": "https://codecommit-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-east-1.amazonaws.com" + "url": "https://codecommit-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://codecommit.us-east-1.api.aws" + "url": "https://codecommit.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codecommit.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.us-east-2.api.aws" + "url": "https://codecommit-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codecommit-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.us-east-2.api.aws" + "url": "https://codecommit.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codecommit.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://codecommit-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://codecommit-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://codecommit.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://codecommit.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://codecommit.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1476,9 +636,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1488,11 +648,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/codecommit/src/main/resources/codegen-resources/paginators-1.json b/services/codecommit/src/main/resources/codegen-resources/paginators-1.json index ab4bae4cd48..f363a39f6e3 100644 --- a/services/codecommit/src/main/resources/codegen-resources/paginators-1.json +++ b/services/codecommit/src/main/resources/codegen-resources/paginators-1.json @@ -50,6 +50,11 @@ "output_token": "nextToken", "result_key": "branches" }, + "ListFileCommitHistory": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" + }, "ListPullRequests": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/services/codecommit/src/main/resources/codegen-resources/service-2.json b/services/codecommit/src/main/resources/codegen-resources/service-2.json index 95dd5110159..74ea48996c6 100644 --- a/services/codecommit/src/main/resources/codegen-resources/service-2.json +++ b/services/codecommit/src/main/resources/codegen-resources/service-2.json @@ -172,7 +172,7 @@ {"shape":"InvalidApprovalRuleTemplateDescriptionException"}, {"shape":"NumberOfRuleTemplatesExceededException"} ], - "documentation":"

    Creates a template for approval rules that can then be associated with one or more repositories in your AWS account. When you associate a template with a repository, AWS CodeCommit creates an approval rule that matches the conditions of the template for all pull requests that meet the conditions of the template. For more information, see AssociateApprovalRuleTemplateWithRepository.

    " + "documentation":"

    Creates a template for approval rules that can then be associated with one or more repositories in your Amazon Web Services account. When you associate a template with a repository, CodeCommit creates an approval rule that matches the conditions of the template for all pull requests that meet the conditions of the template. For more information, see AssociateApprovalRuleTemplateWithRepository.

    " }, "CreateBranch":{ "name":"CreateBranch", @@ -1084,7 +1084,7 @@ {"shape":"InvalidMaxResultsException"}, {"shape":"InvalidContinuationTokenException"} ], - "documentation":"

    Lists all approval rule templates in the specified AWS Region in your AWS account. If an AWS Region is not specified, the AWS Region where you are signed in is used.

    " + "documentation":"

    Lists all approval rule templates in the specified Amazon Web Services Region in your Amazon Web Services account. If an Amazon Web Services Region is not specified, the Amazon Web Services Region where you are signed in is used.

    " }, "ListAssociatedApprovalRuleTemplatesForRepository":{ "name":"ListAssociatedApprovalRuleTemplatesForRepository", @@ -1129,6 +1129,32 @@ ], "documentation":"

    Gets information about one or more branches in a repository.

    " }, + "ListFileCommitHistory":{ + "name":"ListFileCommitHistory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFileCommitHistoryRequest"}, + "output":{"shape":"ListFileCommitHistoryResponse"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidContinuationTokenException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"TipsDivergenceExceededException"}, + {"shape":"CommitRequiredException"}, + {"shape":"InvalidCommitException"}, + {"shape":"CommitDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ], + "documentation":"

    Retrieves a list of commits and changes to a specified file.

    " + }, "ListPullRequests":{ "name":"ListPullRequests", "http":{ @@ -1205,7 +1231,7 @@ {"shape":"ResourceArnRequiredException"}, {"shape":"InvalidResourceArnException"} ], - "documentation":"

    Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    " + "documentation":"

    Gets information about Amazon Web Servicestags for a specified Amazon Resource Name (ARN) in CodeCommit. For a list of valid resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    " }, "MergeBranchesByFastForward":{ "name":"MergeBranchesByFastForward", @@ -1654,7 +1680,7 @@ {"shape":"DirectoryNameConflictsWithFileNameException"}, {"shape":"FilePathConflictsWithSubmodulePathException"} ], - "documentation":"

    Adds or updates a file in a branch in an AWS CodeCommit repository, and generates a commit for the addition in the specified branch.

    " + "documentation":"

    Adds or updates a file in a branch in an CodeCommit repository, and generates a commit for the addition in the specified branch.

    " }, "PutRepositoryTriggers":{ "name":"PutRepositoryTriggers", @@ -1707,7 +1733,7 @@ {"shape":"InvalidSystemTagUsageException"}, {"shape":"TagPolicyException"} ], - "documentation":"

    Adds or updates tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    " + "documentation":"

    Adds or updates tags for a resource in CodeCommit. For a list of valid resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    " }, "TestRepositoryTriggers":{ "name":"TestRepositoryTriggers", @@ -1760,7 +1786,7 @@ {"shape":"InvalidSystemTagUsageException"}, {"shape":"TagPolicyException"} ], - "documentation":"

    Removes tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    " + "documentation":"

    Removes tags for a resource in CodeCommit. For a list of valid resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    " }, "UpdateApprovalRuleTemplateContent":{ "name":"UpdateApprovalRuleTemplateContent", @@ -1999,7 +2025,7 @@ {"shape":"RepositoryNameRequiredException"}, {"shape":"InvalidRepositoryNameException"} ], - "documentation":"

    Renames a repository. The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix .git is prohibited. For more information about the limits on repository names, see Limits in the AWS CodeCommit User Guide.

    " + "documentation":"

    Renames a repository. The repository name must be unique across the calling Amazon Web Services account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix .git is prohibited. For more information about the limits on repository names, see Quotas in the CodeCommit User Guide.

    " } }, "shapes":{ @@ -2008,7 +2034,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The specified Amazon Resource Name (ARN) does not exist in the AWS account.

    ", + "documentation":"

    The specified Amazon Resource Name (ARN) does not exist in the Amazon Web Services account.

    ", "exception":true }, "AdditionalData":{"type":"string"}, @@ -2198,7 +2224,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The specified approval rule template does not exist. Verify that the name is correct and that you are signed in to the AWS Region where the template was created, and then try again.

    ", + "documentation":"

    The specified approval rule template does not exist. Verify that the name is correct and that you are signed in to the Amazon Web Services Region where the template was created, and then try again.

    ", "exception":true }, "ApprovalRuleTemplateId":{"type":"string"}, @@ -2218,7 +2244,7 @@ "type":"structure", "members":{ }, - "documentation":"

    You cannot create an approval rule template with that name because a template with that name already exists in this AWS Region for your AWS account. Approval rule template names must be unique.

    ", + "documentation":"

    You cannot create an approval rule template with that name because a template with that name already exists in this Amazon Web Services Region for your Amazon Web Services account. Approval rule template names must be unique.

    ", "exception":true }, "ApprovalRuleTemplateNameList":{ @@ -2295,7 +2321,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The specified Amazon Resource Name (ARN) does not exist in the AWS account.

    ", + "documentation":"

    The specified Amazon Resource Name (ARN) does not exist in the Amazon Web Services account.

    ", "exception":true }, "BatchAssociateApprovalRuleTemplateWithRepositoriesError":{ @@ -2783,7 +2809,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The comment is too large. Comments are limited to 1,000 characters.

    ", + "documentation":"

    The comment is too large. Comments are limited to 10,240 characters.

    ", "exception":true }, "CommentDeletedException":{ @@ -3116,7 +3142,7 @@ }, "approvalRuleTemplateContent":{ "shape":"ApprovalRuleTemplateContent", - "documentation":"

    The content of the approval rule that is created on pull requests in associated repositories. If you specify one or more destination references (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template.

    When you create the content of the approval rule template, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The content of the approval rule that is created on pull requests in associated repositories. If you specify one or more destination references (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template.

    When you create the content of the approval rule template, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an Amazon Web Services account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the Amazon Web Services account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " }, "approvalRuleTemplateDescription":{ "shape":"ApprovalRuleTemplateDescription", @@ -3249,7 +3275,7 @@ }, "approvalRuleContent":{ "shape":"ApprovalRuleContent", - "documentation":"

    The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For more information about approval pools, see the AWS CodeCommit User Guide.

    When you create the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following would be counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For more information about approval pools, see the CodeCommit User Guide.

    When you create the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an Amazon Web Services account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the Amazon Web Services account 123456789012 and Mary_Major, all of the following would be counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " } } }, @@ -3284,7 +3310,7 @@ }, "clientRequestToken":{ "shape":"ClientRequestToken", - "documentation":"

    A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

    The AWS SDKs prepopulate client request tokens. If you are using an AWS SDK, an idempotency token is created for you.

    ", + "documentation":"

    A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

    The Amazon Web ServicesSDKs prepopulate client request tokens. If you are using an Amazon Web ServicesSDK, an idempotency token is created for you.

    ", "idempotencyToken":true } } @@ -3305,7 +3331,7 @@ "members":{ "repositoryName":{ "shape":"RepositoryName", - "documentation":"

    The name of the new repository to be created.

    The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For more information about the limits on repository names, see Limits in the AWS CodeCommit User Guide. The suffix .git is prohibited.

    " + "documentation":"

    The name of the new repository to be created.

    The repository name must be unique across the calling Amazon Web Services account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For more information about the limits on repository names, see Quotas in the CodeCommit User Guide. The suffix .git is prohibited.

    " }, "repositoryDescription":{ "shape":"RepositoryDescription", @@ -4025,9 +4051,28 @@ "type":"structure", "members":{ }, - "documentation":"

    The specified file exceeds the file size limit for AWS CodeCommit. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

    ", + "documentation":"

    The specified file exceeds the file size limit for CodeCommit. For more information about limits in CodeCommit, see Quotas in the CodeCommit User Guide.

    ", "exception":true }, + "FileVersion":{ + "type":"structure", + "members":{ + "commit":{"shape":"Commit"}, + "blobId":{ + "shape":"ObjectId", + "documentation":"

    The blob ID of the object that represents the content of the file in this version.

    " + }, + "path":{ + "shape":"Path", + "documentation":"

    The name and path of the file at which this blob is indexed which contains the data for this version of the file. This value will vary between file versions if a file is renamed or if its path changes.

    " + }, + "revisionChildren":{ + "shape":"RevisionChildren", + "documentation":"

    An array of commit IDs that contain more recent versions of this file. If there are no additional versions of the file, this array will be empty.

    " + } + }, + "documentation":"

    Information about a version of a file.

    " + }, "FilesMetadata":{ "type":"list", "member":{"shape":"FileMetadata"} @@ -4248,15 +4293,15 @@ }, "repositoryName":{ "shape":"RepositoryName", - "documentation":"

    The name of the repository that contains the pull request.

    " + "documentation":"

    The name of the repository that contains the pull request. Requirement is conditional: repositoryName must be specified when beforeCommitId and afterCommitId are included.

    " }, "beforeCommitId":{ "shape":"CommitId", - "documentation":"

    The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created.

    " + "documentation":"

    The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created. Requirement is conditional: beforeCommitId must be specified when repositoryName is included.

    " }, "afterCommitId":{ "shape":"CommitId", - "documentation":"

    The full commit ID of the commit in the source branch that was the tip of the branch at the time the comment was made.

    " + "documentation":"

    The full commit ID of the commit in the source branch that was the tip of the branch at the time the comment was made. Requirement is conditional: afterCommitId must be specified when repositoryName is included.

    " }, "nextToken":{ "shape":"NextToken", @@ -4373,7 +4418,7 @@ }, "commitSpecifier":{ "shape":"CommitName", - "documentation":"

    The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/master. If none is provided, the head commit is used.

    " + "documentation":"

    The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/main. If none is provided, the head commit is used.

    " }, "filePath":{ "shape":"Path", @@ -4824,14 +4869,14 @@ "type":"structure", "members":{ }, - "documentation":"

    The description for the approval rule template is not valid because it exceeds the maximum characters allowed for a description. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

    ", + "documentation":"

    The description for the approval rule template is not valid because it exceeds the maximum characters allowed for a description. For more information about limits in CodeCommit, see Quotas in the CodeCommit User Guide.

    ", "exception":true }, "InvalidApprovalRuleTemplateNameException":{ "type":"structure", "members":{ }, - "documentation":"

    The name of the approval rule template is not valid. Template names must be between 1 and 100 valid characters in length. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

    ", + "documentation":"

    The name of the approval rule template is not valid. Template names must be between 1 and 100 valid characters in length. For more information about limits in CodeCommit, see Quotas in the CodeCommit User Guide.

    ", "exception":true }, "InvalidApprovalStateException":{ @@ -5062,14 +5107,14 @@ "type":"structure", "members":{ }, - "documentation":"

    The value of the reaction is not valid. For more information, see the AWS CodeCommit User Guide.

    ", + "documentation":"

    The value of the reaction is not valid. For more information, see the CodeCommit User Guide.

    ", "exception":true }, "InvalidReferenceNameException":{ "type":"structure", "members":{ }, - "documentation":"

    The specified reference name format is not valid. Reference names must conform to the Git references format (for example, refs/heads/master). For more information, see Git Internals - Git References or consult your Git documentation.

    ", + "documentation":"

    The specified reference name format is not valid. Reference names must conform to the Git references format (for example, refs/heads/main). For more information, see Git Internals - Git References or consult your Git documentation.

    ", "exception":true }, "InvalidRelativeFileVersionEnumException":{ @@ -5146,14 +5191,14 @@ "type":"structure", "members":{ }, - "documentation":"

    The AWS Region for the trigger target does not match the AWS Region for the repository. Triggers must be created in the same Region as the target for the trigger.

    ", + "documentation":"

    The Amazon Web Services Region for the trigger target does not match the Amazon Web Services Region for the repository. Triggers must be created in the same Amazon Web Services Region as the target for the trigger.

    ", "exception":true }, "InvalidResourceArnException":{ "type":"structure", "members":{ }, - "documentation":"

    The value for the resource ARN is not valid. For more information about resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    ", + "documentation":"

    The value for the resource ARN is not valid. For more information about resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    ", "exception":true }, "InvalidRevisionIdException":{ @@ -5284,7 +5329,7 @@ "members":{ "approvalRuleTemplateNames":{ "shape":"ApprovalRuleTemplateNameList", - "documentation":"

    The names of all the approval rule templates found in the AWS Region for your AWS account.

    " + "documentation":"

    The names of all the approval rule templates found in the Amazon Web Services Region for your Amazon Web Services account.

    " }, "nextToken":{ "shape":"NextToken", @@ -5352,6 +5397,49 @@ }, "documentation":"

    Represents the output of a list branches operation.

    " }, + "ListFileCommitHistoryRequest":{ + "type":"structure", + "required":[ + "repositoryName", + "filePath" + ], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

    The name of the repository that contains the file.

    " + }, + "commitSpecifier":{ + "shape":"CommitName", + "documentation":"

    The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/main. If none is provided, the head commit is used.

    " + }, + "filePath":{ + "shape":"Path", + "documentation":"

    The full path of the file whose history you want to retrieve, including the name of the file.

    " + }, + "maxResults":{ + "shape":"Limit", + "documentation":"

    A non-zero, non-negative integer used to limit the number of returned results.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that allows the operation to batch the results.

    " + } + } + }, + "ListFileCommitHistoryResponse":{ + "type":"structure", + "required":["revisionDag"], + "members":{ + "revisionDag":{ + "shape":"RevisionDag", + "documentation":"

    An array of FileVersion objects that form a directed acyclic graph (DAG) of the changes to the file made by the commits that changed the file.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    An enumeration token that can be used to return the next batch of results.

    " + } + } + }, "ListPullRequestsInput":{ "type":"structure", "required":["repositoryName"], @@ -5428,7 +5516,7 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

    " + "documentation":"

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to CodeCommit, another page of 1,000 records is retrieved.

    " }, "sortBy":{ "shape":"SortByEnum", @@ -5450,7 +5538,7 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

    " + "documentation":"

    An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to CodeCommit, another page of 1,000 records is retrieved.

    " } }, "documentation":"

    Represents the output of a list repositories operation.

    " @@ -6027,7 +6115,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The maximum number of approval rule templates has been exceeded for this AWS Region.

    ", + "documentation":"

    The maximum number of approval rule templates has been exceeded for this Amazon Web Services Region.

    ", "exception":true }, "NumberOfRulesExceededException":{ @@ -6644,7 +6732,7 @@ }, "reactionValue":{ "shape":"ReactionValue", - "documentation":"

    The emoji reaction you want to add or update. To remove a reaction, provide a value of blank or null. You can also provide the value of none. For information about emoji reaction values supported in AWS CodeCommit, see the AWS CodeCommit User Guide.

    " + "documentation":"

    The emoji reaction you want to add or update. To remove a reaction, provide a value of blank or null. You can also provide the value of none. For information about emoji reaction values supported in CodeCommit, see the CodeCommit User Guide.

    " } } }, @@ -6833,7 +6921,7 @@ "documentation":"

    The Unicode codepoint for the reaction.

    " } }, - "documentation":"

    Information about the values for reactions to a comment. AWS CodeCommit supports a limited set of reactions.

    " + "documentation":"

    Information about the values for reactions to a comment. CodeCommit supports a limited set of reactions.

    " }, "ReactionValueRequiredException":{ "type":"structure", @@ -6952,7 +7040,7 @@ "members":{ "accountId":{ "shape":"AccountId", - "documentation":"

    The ID of the AWS account associated with the repository.

    " + "documentation":"

    The ID of the Amazon Web Services account associated with the repository.

    " }, "repositoryId":{ "shape":"RepositoryId", @@ -7086,7 +7174,7 @@ "documentation":"

    The repository events that cause the trigger to run actions in another service, such as sending a notification through Amazon SNS.

    The valid value \"all\" cannot be used with any other values.

    " } }, - "documentation":"

    Information about a trigger for a repository.

    " + "documentation":"

    Information about a trigger for a repository.

    If you want to receive notifications about repository events, consider using notifications instead of triggers. For more information, see Configuring notifications for repository events.

    " }, "RepositoryTriggerBranchNameListRequiredException":{ "type":"structure", @@ -7171,7 +7259,7 @@ "type":"structure", "members":{ }, - "documentation":"

    A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

    ", + "documentation":"

    A valid Amazon Resource Name (ARN) for an CodeCommit resource is required. For a list of valid resources in CodeCommit, see CodeCommit Resources and Operations in the CodeCommit User Guide.

    ", "exception":true }, "RestrictedSourceFileException":{ @@ -7181,6 +7269,14 @@ "documentation":"

    The commit cannot be created because one of the changes specifies copying or moving a .gitkeep file.

    ", "exception":true }, + "RevisionChildren":{ + "type":"list", + "member":{"shape":"RevisionId"} + }, + "RevisionDag":{ + "type":"list", + "member":{"shape":"FileVersion"} + }, "RevisionId":{"type":"string"}, "RevisionIdRequiredException":{ "type":"structure", @@ -7475,7 +7571,7 @@ "type":"structure", "members":{ }, - "documentation":"

    The maximum number of tags for an AWS CodeCommit resource has been exceeded.

    ", + "documentation":"

    The maximum number of tags for an CodeCommit resource has been exceeded.

    ", "exception":true }, "UntagResourceInput":{ @@ -7612,11 +7708,11 @@ "members":{ "repositoryName":{ "shape":"RepositoryName", - "documentation":"

    The name of the repository to set or change the default branch for.

    " + "documentation":"

    The name of the repository for which you want to set or change the default branch.

    " }, "defaultBranchName":{ "shape":"BranchName", - "documentation":"

    The name of the branch to set as the default.

    " + "documentation":"

    The name of the branch to set as the default branch.

    " } }, "documentation":"

    Represents the input of an update default branch operation.

    " @@ -7643,7 +7739,7 @@ }, "newRuleContent":{ "shape":"ApprovalRuleContent", - "documentation":"

    The updated content for the approval rule.

    When you update the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an AWS account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the AWS account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The updated content for the approval rule.

    When you update the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

    • CodeCommitApprovers: This option only requires an Amazon Web Services account and a resource. It can be used for both IAM users and federated access users whose name matches the provided resource name. This is a very powerful option that offers a great deal of flexibility. For example, if you specify the Amazon Web Services account 123456789012 and Mary_Major, all of the following are counted as approvals coming from that user:

      • An IAM user in the account (arn:aws:iam::123456789012:user/Mary_Major)

      • A federated user identified in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major)

      This option does not recognize an active session of someone assuming the role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you include a wildcard (*Mary_Major).

    • Fully qualified ARN: This option allows you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or role.

    For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

    " } } }, @@ -7813,5 +7909,5 @@ }, "blob":{"type":"blob"} }, - "documentation":"AWS CodeCommit

    This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

    You can use the AWS CodeCommit API to work with the following objects:

    Repositories, by calling the following:

    • BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.

    • CreateRepository, which creates an AWS CodeCommit repository.

    • DeleteRepository, which deletes an AWS CodeCommit repository.

    • GetRepository, which returns information about a specified repository.

    • ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.

    • UpdateRepositoryDescription, which sets or updates the description of the repository.

    • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use.

    Branches, by calling the following:

    • CreateBranch, which creates a branch in a specified repository.

    • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

    • GetBranch, which returns information about a specified branch.

    • ListBranches, which lists all branches for a specified repository.

    • UpdateDefaultBranch, which changes the default branch for a repository.

    Files, by calling the following:

    • DeleteFile, which deletes the content of a specified file from a specified branch.

    • GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository.

    • GetFile, which returns the base-64 encoded content of a specified file.

    • GetFolder, which returns the contents of a specified folder or directory.

    • PutFile, which adds or modifies a single file in a specified repository and branch.

    Commits, by calling the following:

    • BatchGetCommits, which returns information about one or more commits in a repository.

    • CreateCommit, which creates a commit for changes to a repository.

    • GetCommit, which returns information about a commit, including commit messages and author and committer information.

    • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference).

    Merges, by calling the following:

    • BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.

    • CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.

    • DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.

    • GetMergeCommit, which returns information about the merge between a source and destination commit.

    • GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.

    • GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.

    • MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.

    • MergeBranchesBySquash, which merges two branches using the squash merge option.

    • MergeBranchesByThreeWay, which merges two branches using the three-way merge option.

    Pull requests, by calling the following:

    Approval rule templates, by calling the following:

    Comments in a repository, by calling the following:

    Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

    • ListTagsForResource, which gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit.

    • TagResource, which adds or updates tags for a resource in AWS CodeCommit.

    • UntagResource, which removes tags for a resource in AWS CodeCommit.

    Triggers, by calling the following:

    • GetRepositoryTriggers, which returns information about triggers configured for a repository.

    • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

    • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

    For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

    " + "documentation":"CodeCommit

    This is the CodeCommit API Reference. This reference provides descriptions of the operations and data types for CodeCommit API along with usage examples.

    You can use the CodeCommit API to work with the following objects:

    Repositories, by calling the following:

    • BatchGetRepositories, which returns information about one or more repositories associated with your Amazon Web Services account.

    • CreateRepository, which creates an CodeCommit repository.

    • DeleteRepository, which deletes an CodeCommit repository.

    • GetRepository, which returns information about a specified repository.

    • ListRepositories, which lists all CodeCommit repositories associated with your Amazon Web Services account.

    • UpdateRepositoryDescription, which sets or updates the description of the repository.

    • UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use.

    Branches, by calling the following:

    • CreateBranch, which creates a branch in a specified repository.

    • DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.

    • GetBranch, which returns information about a specified branch.

    • ListBranches, which lists all branches for a specified repository.

    • UpdateDefaultBranch, which changes the default branch for a repository.

    Files, by calling the following:

    • DeleteFile, which deletes the content of a specified file from a specified branch.

    • GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository.

    • GetFile, which returns the base-64 encoded content of a specified file.

    • GetFolder, which returns the contents of a specified folder or directory.

    • ListFileCommitHistory, which retrieves a list of commits and changes to a specified file.

    • PutFile, which adds or modifies a single file in a specified repository and branch.

    Commits, by calling the following:

    • BatchGetCommits, which returns information about one or more commits in a repository.

    • CreateCommit, which creates a commit for changes to a repository.

    • GetCommit, which returns information about a commit, including commit messages and author and committer information.

    • GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference).

    Merges, by calling the following:

    • BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository.

    • CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts.

    • DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge.

    • GetMergeCommit, which returns information about the merge between a source and destination commit.

    • GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.

    • GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers.

    • MergeBranchesByFastForward, which merges two branches using the fast-forward merge option.

    • MergeBranchesBySquash, which merges two branches using the squash merge option.

    • MergeBranchesByThreeWay, which merges two branches using the three-way merge option.

    Pull requests, by calling the following:

    Approval rule templates, by calling the following:

    Comments in a repository, by calling the following:

    Tags used to tag resources in CodeCommit (not Git tags), by calling the following:

    • ListTagsForResource, which gets information about Amazon Web Servicestags for a specified Amazon Resource Name (ARN) in CodeCommit.

    • TagResource, which adds or updates tags for a resource in CodeCommit.

    • UntagResource, which removes tags for a resource in CodeCommit.

    Triggers, by calling the following:

    • GetRepositoryTriggers, which returns information about triggers configured for a repository.

    • PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.

    • TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.

    For information about how to use CodeCommit, see the CodeCommit User Guide.

    " } diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index f04590778f6..498e294ab41 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codedeploy/src/main/resources/codegen-resources/customization.config b/services/codedeploy/src/main/resources/codegen-resources/customization.config index 9ef3b906a84..3fa976b82ee 100644 --- a/services/codedeploy/src/main/resources/codegen-resources/customization.config +++ b/services/codedeploy/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ "listGitHubAccountTokenNames", "listOnPremisesInstances" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "continueDeployment", "skipWaitTimeForInstanceTermination", "updateApplication", diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index 96148f9a33a..3e0e9025f54 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index 9ee676d8e62..66b10253d6d 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurusecurity/pom.xml b/services/codegurusecurity/pom.xml index 11e32ba49d7..f85c8b416a3 100644 --- a/services/codegurusecurity/pom.xml +++ b/services/codegurusecurity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codegurusecurity AWS Java SDK :: Services :: Code Guru Security diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 7cd9396534c..650eddd6194 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codepipeline/src/main/resources/codegen-resources/customization.config b/services/codepipeline/src/main/resources/codegen-resources/customization.config index aeb5a89163e..2239d96d258 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/customization.config +++ b/services/codepipeline/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "listPipelines", "listWebhooks" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deregisterWebhookWithThirdParty", "registerWebhookWithThirdParty" ] diff --git a/services/codestar/pom.xml b/services/codestar/pom.xml index 59c442de302..c8df6275e97 100644 --- a/services/codestar/pom.xml +++ b/services/codestar/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codestar AWS Java SDK :: Services :: AWS CodeStar diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index 651cc62b507..6aa4ff6e316 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index facbf562765..ca49e67b5e6 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index da7f4390ea1..552a155d7da 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index 79d41a01b63..e681f053f43 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config b/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config index e8cd40c8e33..0a61574c89d 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "associateSoftwareToken" ], "shapeModifiers" : { diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/endpoint-rule-set.json index e6566d99ff9..0f514686ef3 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cognito-idp-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://cognito-idp-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://cognito-idp-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://cognito-idp-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://cognito-idp.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://cognito-idp.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://cognito-idp.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://cognito-idp.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json index e2356443cd4..2a7f59f9240 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json @@ -534,7 +534,7 @@ {"shape":"InvalidEmailRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} ], - "documentation":"

    This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

    If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

    Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user.

    For custom attributes, you must prepend the custom: prefix to the attribute name.

    In addition to updating user attributes, this API can also be used to mark phone and email as verified.

    Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

    Learn more

    " + "documentation":"

    This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

    If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

    Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value.

    For custom attributes, you must prepend the custom: prefix to the attribute name.

    In addition to updating user attributes, this API can also be used to mark phone and email as verified.

    Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

    Learn more

    " }, "AdminUserGlobalSignOut":{ "name":"AdminUserGlobalSignOut", @@ -571,7 +571,8 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

    Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

    After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

    Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

    After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "ChangePassword":{ "name":"ChangePassword", @@ -620,7 +621,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Confirms tracking of the device. This API call is the call that begins device tracking.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Confirms tracking of the device. This API call is the call that begins device tracking.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "ConfirmForgotPassword":{ "name":"ConfirmForgotPassword", @@ -1094,7 +1096,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Forgets the specified device.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Forgets the specified device.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "ForgotPassword":{ "name":"ForgotPassword", @@ -1161,7 +1164,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Gets the device.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Gets the device.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "GetGroup":{ "name":"GetGroup", @@ -1333,7 +1337,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Signs out a user from all devices. GlobalSignOut invalidates all identity, access and refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.

    Your app isn't aware that a user's access token is revoked unless it attempts to authorize a user pools API request with an access token that contains the scope aws.cognito.signin.user.admin. Your app might otherwise accept access tokens until they expire.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Signs out a user from all devices. GlobalSignOut invalidates all identity, access and refresh tokens that Amazon Cognito has issued to a user. A user can still use a hosted UI cookie to retrieve new tokens for the duration of the 1-hour cookie validity period.

    Your app isn't aware that a user's access token is revoked unless it attempts to authorize a user pools API request with an access token that contains the scope aws.cognito.signin.user.admin. Your app might otherwise accept access tokens until they expire.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "InitiateAuth":{ "name":"InitiateAuth", @@ -1383,7 +1388,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Lists the sign-in devices that Amazon Cognito has registered to the current user.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Lists the sign-in devices that Amazon Cognito has registered to the current user.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "ListGroups":{ "name":"ListGroups", @@ -1616,7 +1622,8 @@ {"shape":"UnsupportedTokenTypeException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "SetLogDeliveryConfiguration":{ "name":"SetLogDeliveryConfiguration", @@ -1690,7 +1697,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "SetUserPoolMfaConfig":{ "name":"SetUserPoolMfaConfig", @@ -1847,7 +1855,8 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

    Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "UpdateDeviceStatus":{ "name":"UpdateDeviceStatus", @@ -1869,7 +1878,8 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Updates the device status.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Updates the device status.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "UpdateGroup":{ "name":"UpdateGroup", @@ -2040,7 +2050,8 @@ {"shape":"CodeMismatchException"}, {"shape":"ForbiddenException"} ], - "documentation":"

    Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    " + "documentation":"

    Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

    ", + "authtype":"none" }, "VerifyUserAttribute":{ "name":"VerifyUserAttribute", @@ -3394,7 +3405,8 @@ "ChallengeResponsesType":{ "type":"map", "key":{"shape":"StringType"}, - "value":{"shape":"StringType"} + "value":{"shape":"StringType"}, + "sensitive":true }, "ChangePasswordRequest":{ "type":"structure", @@ -3464,7 +3476,7 @@ "members":{ "LogGroupArn":{ "shape":"ArnType", - "documentation":"

    The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account as your user pool.

    " + "documentation":"

    The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account as your user pool.

    To send logs to log groups with a resource policy of a size greater than 5120 characters, configure a log group with a path that starts with /aws/vendedlogs. For more information, see Enabling logging from certain Amazon Web Services services.

    " } }, "documentation":"

    The CloudWatch logging destination of a user pool detailed activity logging configuration.

    " @@ -3788,7 +3800,7 @@ "documentation":"

    The user pool ID.

    " }, "ProviderName":{ - "shape":"ProviderNameTypeV1", + "shape":"ProviderNameTypeV2", "documentation":"

    The IdP name.

    " }, "ProviderType":{ @@ -6189,6 +6201,7 @@ }, "PaginationKey":{ "type":"string", + "max":131072, "min":1, "pattern":"[\\S]+" }, @@ -6317,13 +6330,13 @@ "type":"string", "max":32, "min":1, - "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+" + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}\\p{Z}]+" }, - "ProviderNameTypeV1":{ + "ProviderNameTypeV2":{ "type":"string", "max":32, - "min":3, - "pattern":"[^_][\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}][^_]+" + "min":1, + "pattern":"[^_\\p{Z}][\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}][^_\\p{Z}]+" }, "ProviderUserIdentifierType":{ "type":"structure", @@ -6793,7 +6806,8 @@ "SessionType":{ "type":"string", "max":2048, - "min":20 + "min":20, + "sensitive":true }, "SetLogDeliveryConfigurationRequest":{ "type":"structure", @@ -7100,7 +7114,8 @@ "type":"string", "max":6, "min":6, - "pattern":"[0-9]+" + "pattern":"[0-9]+", + "sensitive":true }, "SoftwareTokenMfaConfigType":{ "type":"structure", @@ -7875,7 +7890,8 @@ "documentation":"

    Encoded device-fingerprint details that your app collected with the Amazon Cognito context data collection library. For more information, see Adding user device and session data to API requests.

    " } }, - "documentation":"

    Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

    " + "documentation":"

    Contextual data, such as the user's device fingerprint, IP address, or location, used for evaluating the risk of an unexpected event by Amazon Cognito advanced security.

    ", + "sensitive":true }, "UserFilterType":{ "type":"string", diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index fab179973a3..657aefa832b 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index f873a7dc5c5..1ae2c010f11 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index 8881a0c3794..5ef93ced619 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index 680c7793002..09268ade038 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/computeoptimizer/src/main/resources/codegen-resources/endpoint-rule-set.json index 41a58051cf6..dfdfa862172 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://compute-optimizer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://compute-optimizer-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://compute-optimizer-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://compute-optimizer-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://compute-optimizer.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://compute-optimizer.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://compute-optimizer.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://compute-optimizer.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json index 53a841b3d83..7974def4d42 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -153,6 +153,26 @@ ], "documentation":"

    Exports optimization recommendations for Lambda functions.

    Recommendations are exported in a comma-separated values (.csv) file, and its metadata in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one Lambda function export job in progress per Amazon Web Services Region.

    " }, + "ExportLicenseRecommendations":{ + "name":"ExportLicenseRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExportLicenseRecommendationsRequest"}, + "output":{"shape":"ExportLicenseRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Export optimization recommendations for your licenses.

    Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one license export job in progress per Amazon Web Services Region.

    " + }, "GetAutoScalingGroupRecommendations":{ "name":"GetAutoScalingGroupRecommendations", "http":{ @@ -349,6 +369,26 @@ ], "documentation":"

    Returns Lambda function recommendations.

    Compute Optimizer generates recommendations for functions that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

    " }, + "GetLicenseRecommendations":{ + "name":"GetLicenseRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLicenseRecommendationsRequest"}, + "output":{"shape":"GetLicenseRecommendationsResponse"}, + "errors":[ + {"shape":"OptInRequiredException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingAuthenticationToken"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns license recommendations for Amazon EC2 instances that run on a specific license.

    Compute Optimizer generates recommendations for licenses that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

    " + }, "GetRecommendationPreferences":{ "name":"GetRecommendationPreferences", "http":{ @@ -1346,6 +1386,43 @@ "s3Destination":{"shape":"S3Destination"} } }, + "ExportLicenseRecommendationsRequest":{ + "type":"structure", + "required":["s3DestinationConfig"], + "members":{ + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The IDs of the Amazon Web Services accounts for which to export license recommendations.

    If your account is the management account of an organization, use this parameter to specify the member account for which you want to export recommendations.

    This parameter can't be specified together with the include member accounts parameter. The parameters are mutually exclusive.

    If this parameter is omitted, recommendations for member accounts aren't included in the export.

    You can specify multiple account IDs per request.

    " + }, + "filters":{ + "shape":"LicenseRecommendationFilters", + "documentation":"

    An array of objects to specify a filter that exports a more specific set of license recommendations.

    " + }, + "fieldsToExport":{ + "shape":"ExportableLicenseFields", + "documentation":"

    The recommendations data to include in the export file. For more information about the fields that can be exported, see Exported files in the Compute Optimizer User Guide.

    " + }, + "s3DestinationConfig":{"shape":"S3DestinationConfig"}, + "fileFormat":{ + "shape":"FileFormat", + "documentation":"

    The format of the export file.

    A CSV file is the only export format currently supported.

    " + }, + "includeMemberAccounts":{ + "shape":"IncludeMemberAccounts", + "documentation":"

    Indicates whether to include recommendations for resources in all member accounts of the organization if your account is the management account of an organization.

    The member accounts must also be opted in to Compute Optimizer, and trusted access for Compute Optimizer must be enabled in the organization account. For more information, see Compute Optimizer and Amazon Web Services Organizations trusted access in the Compute Optimizer User Guide.

    If this parameter is omitted, recommendations for member accounts of the organization aren't included in the export file .

    This parameter cannot be specified together with the account IDs parameter. The parameters are mutually exclusive.

    " + } + } + }, + "ExportLicenseRecommendationsResponse":{ + "type":"structure", + "members":{ + "jobId":{ + "shape":"JobId", + "documentation":"

    The identification number of the export job.

    To view the status of an export job, use the DescribeRecommendationExportJobs action and specify the job ID.

    " + }, + "s3Destination":{"shape":"S3Destination"} + } + }, "ExportableAutoScalingGroupField":{ "type":"string", "enum":[ @@ -1544,6 +1621,36 @@ "type":"list", "member":{"shape":"ExportableLambdaFunctionField"} }, + "ExportableLicenseField":{ + "type":"string", + "enum":[ + "AccountId", + "ResourceArn", + "LookbackPeriodInDays", + "LastRefreshTimestamp", + "Finding", + "FindingReasonCodes", + "CurrentLicenseConfigurationNumberOfCores", + "CurrentLicenseConfigurationInstanceType", + "CurrentLicenseConfigurationOperatingSystem", + "CurrentLicenseConfigurationLicenseName", + "CurrentLicenseConfigurationLicenseEdition", + "CurrentLicenseConfigurationLicenseModel", + "CurrentLicenseConfigurationLicenseVersion", + "CurrentLicenseConfigurationMetricsSource", + "RecommendationOptionsOperatingSystem", + "RecommendationOptionsLicenseEdition", + "RecommendationOptionsLicenseModel", + "RecommendationOptionsSavingsOpportunityPercentage", + "RecommendationOptionsEstimatedMonthlySavingsCurrency", + "RecommendationOptionsEstimatedMonthlySavingsValue", + "Tags" + ] + }, + "ExportableLicenseFields":{ + "type":"list", + "member":{"shape":"ExportableLicenseField"} + }, "ExportableVolumeField":{ "type":"string", "enum":[ @@ -2074,6 +2181,48 @@ } } }, + "GetLicenseRecommendationsRequest":{ + "type":"structure", + "members":{ + "resourceArns":{ + "shape":"ResourceArns", + "documentation":"

    The ARN that identifies the Amazon EC2 instance.

    The following is the format of the ARN:

    arn:aws:ec2:region:aws_account_id:instance/instance-id

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to advance to the next page of license recommendations.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of license recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned nextToken value.

    " + }, + "filters":{ + "shape":"LicenseRecommendationFilters", + "documentation":"

    An array of objects to specify a filter that returns a more specific list of license recommendations.

    " + }, + "accountIds":{ + "shape":"AccountIds", + "documentation":"

    The ID of the Amazon Web Services account for which to return license recommendations.

    If your account is the management account of an organization, use this parameter to specify the member account for which you want to return license recommendations.

    Only one account ID can be specified per request.

    " + } + } + }, + "GetLicenseRecommendationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The token to use to advance to the next page of license recommendations.

    " + }, + "licenseRecommendations":{ + "shape":"LicenseRecommendations", + "documentation":"

    An array of objects that describe license recommendations.

    " + }, + "errors":{ + "shape":"GetRecommendationErrors", + "documentation":"

    An array of objects that describe errors of the request.

    " + } + } + }, "GetRecommendationError":{ "type":"structure", "members":{ @@ -2625,6 +2774,185 @@ }, "LastRefreshTimestamp":{"type":"timestamp"}, "LastUpdatedTimestamp":{"type":"timestamp"}, + "LicenseConfiguration":{ + "type":"structure", + "members":{ + "numberOfCores":{ + "shape":"NumberOfCores", + "documentation":"

    The current number of cores associated with the instance.

    " + }, + "instanceType":{ + "shape":"InstanceType", + "documentation":"

    The instance type used in the license.

    " + }, + "operatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

    The operating system of the instance.

    " + }, + "licenseEdition":{ + "shape":"LicenseEdition", + "documentation":"

    The edition of the license for the application that runs on the instance.

    " + }, + "licenseName":{ + "shape":"LicenseName", + "documentation":"

    The name of the license for the application that runs on the instance.

    " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

    The license type associated with the instance.

    " + }, + "licenseVersion":{ + "shape":"LicenseVersion", + "documentation":"

    The version of the license for the application that runs on the instance.

    " + }, + "metricsSource":{ + "shape":"MetricsSource", + "documentation":"

    The list of metric sources required to generate recommendations for commercial software licenses.

    " + } + }, + "documentation":"

    Describes the configuration of a license for an Amazon EC2 instance.

    " + }, + "LicenseEdition":{ + "type":"string", + "enum":[ + "Enterprise", + "Standard", + "Free", + "NoLicenseEditionFound" + ] + }, + "LicenseFinding":{ + "type":"string", + "enum":[ + "InsufficientMetrics", + "Optimized", + "NotOptimized" + ] + }, + "LicenseFindingReasonCode":{ + "type":"string", + "enum":[ + "InvalidCloudWatchApplicationInsightsSetup", + "CloudWatchApplicationInsightsError", + "LicenseOverprovisioned", + "Optimized" + ] + }, + "LicenseFindingReasonCodes":{ + "type":"list", + "member":{"shape":"LicenseFindingReasonCode"} + }, + "LicenseModel":{ + "type":"string", + "enum":[ + "LicenseIncluded", + "BringYourOwnLicense" + ] + }, + "LicenseName":{ + "type":"string", + "enum":["SQLServer"] + }, + "LicenseRecommendation":{ + "type":"structure", + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

    The ARN that identifies the Amazon EC2 instance.

    " + }, + "accountId":{ + "shape":"AccountId", + "documentation":"

    The Amazon Web Services account ID of the license.

    " + }, + "currentLicenseConfiguration":{ + "shape":"LicenseConfiguration", + "documentation":"

    An object that describes the current configuration of an instance that runs on a license.

    " + }, + "lookbackPeriodInDays":{ + "shape":"LookBackPeriodInDays", + "documentation":"

    The number of days for which utilization metrics were analyzed for an instance that runs on a license.

    " + }, + "lastRefreshTimestamp":{ + "shape":"LastRefreshTimestamp", + "documentation":"

    The timestamp of when the license recommendation was last generated.

    " + }, + "finding":{ + "shape":"LicenseFinding", + "documentation":"

    The finding classification for an instance that runs on a license.

    Findings include:

    • InsufficentMetrics — When Compute Optimizer detects that your CloudWatch Application Insights isn't enabled or is enabled with insufficient permissions.

    • NotOptimized — When Compute Optimizer detects that your EC2 infrastructure isn't using any of the SQL server license features you're paying for, a license is considered not optimized.

    • Optimized — When Compute Optimizer detects that all specifications of your license meet the performance requirements of your workload.

    " + }, + "findingReasonCodes":{ + "shape":"LicenseFindingReasonCodes", + "documentation":"

    The reason for the finding classification for an instance that runs on a license.

    Finding reason codes include:

    • Optimized — All specifications of your license meet the performance requirements of your workload.

    • LicenseOverprovisioned — A license is considered over-provisioned when your license can be downgraded while still meeting the performance requirements of your workload.

    • InvalidCloudwatchApplicationInsights — CloudWatch Application Insights isn't configured properly.

    • CloudwatchApplicationInsightsError — There is a CloudWatch Application Insights error.

    " + }, + "licenseRecommendationOptions":{ + "shape":"LicenseRecommendationOptions", + "documentation":"

    An array of objects that describe the license recommendation options.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    A list of tags assigned to an EC2 instance.

    " + } + }, + "documentation":"

    Describes a license recommendation for an EC2 instance.

    " + }, + "LicenseRecommendationFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"LicenseRecommendationFilterName", + "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification.

    Specify FindingReasonCode to return recommendations with a specific finding reason code.

    You can filter your license recommendations by tag:key and tag-key tags.

    A tag:key is a key and value combination of a tag assigned to your license recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all license recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    A tag-key is the key of a tag assigned to your license recommendations. Use this filter to find all of your license recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your license recommendations with a tag key value of Owner or without any tag keys assigned.

    " + }, + "values":{ + "shape":"FilterValues", + "documentation":"

    The value of the filter.

    The valid values for this parameter are as follows, depending on what you specify for the name parameter:

    • If you specify the name parameter as Finding, then specify Optimized, NotOptimized, or InsufficentMetrics.

    • If you specify the name parameter as FindingReasonCode, then specify Optimized, LicenseOverprovisioned, InvalidCloudwatchApplicationInsights, or CloudwatchApplicationInsightsError.

    " + } + }, + "documentation":"

    Describes a filter that returns a more specific list of license recommendations. Use this filter with the GetLicenseRecommendation action.

    " + }, + "LicenseRecommendationFilterName":{ + "type":"string", + "enum":[ + "Finding", + "FindingReasonCode", + "LicenseName" + ] + }, + "LicenseRecommendationFilters":{ + "type":"list", + "member":{"shape":"LicenseRecommendationFilter"} + }, + "LicenseRecommendationOption":{ + "type":"structure", + "members":{ + "rank":{ + "shape":"Rank", + "documentation":"

    The rank of the license recommendation option.

    The top recommendation option is ranked as 1.

    " + }, + "operatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

    The operating system of a license recommendation option.

    " + }, + "licenseEdition":{ + "shape":"LicenseEdition", + "documentation":"

    The recommended edition of the license for the application that runs on the instance.

    " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

    The recommended license type associated with the instance.

    " + }, + "savingsOpportunity":{"shape":"SavingsOpportunity"} + }, + "documentation":"

    Describes the recommendation options for licenses.

    " + }, + "LicenseRecommendationOptions":{ + "type":"list", + "member":{"shape":"LicenseRecommendationOption"} + }, + "LicenseRecommendations":{ + "type":"list", + "member":{"shape":"LicenseRecommendation"} + }, + "LicenseVersion":{"type":"string"}, "LimitExceededException":{ "type":"structure", "members":{ @@ -2682,6 +3010,25 @@ "NETWORK_PACKETS_OUT_PER_SECOND" ] }, + "MetricProviderArn":{"type":"string"}, + "MetricSource":{ + "type":"structure", + "members":{ + "provider":{ + "shape":"MetricSourceProvider", + "documentation":"

    The name of the metric source provider.

    " + }, + "providerArn":{ + "shape":"MetricProviderArn", + "documentation":"

    The ARN of the metric source provider.

    " + } + }, + "documentation":"

    The list of metric sources required to generate recommendations for commercial software licenses.

    " + }, + "MetricSourceProvider":{ + "type":"string", + "enum":["CloudWatchApplicationInsights"] + }, "MetricStatistic":{ "type":"string", "enum":[ @@ -2694,6 +3041,10 @@ "type":"list", "member":{"shape":"MetricValue"} }, + "MetricsSource":{ + "type":"list", + "member":{"shape":"MetricSource"} + }, "MigrationEffort":{ "type":"string", "enum":[ @@ -2717,8 +3068,10 @@ "NullableCpu":{"type":"integer"}, "NullableMemory":{"type":"integer"}, "NullableMemoryReservation":{"type":"integer"}, + "NumberOfCores":{"type":"integer"}, "NumberOfInvocations":{"type":"long"}, "NumberOfMemberAccountsOptedIn":{"type":"integer"}, + "OperatingSystem":{"type":"string"}, "OptInRequiredException":{ "type":"structure", "members":{ @@ -2941,7 +3294,8 @@ "AutoScalingGroup", "EbsVolume", "LambdaFunction", - "EcsService" + "EcsService", + "License" ] }, "RecommendationSources":{ @@ -3006,6 +3360,10 @@ "member":{"shape":"RecommendedOptionProjectedMetric"} }, "ResourceArn":{"type":"string"}, + "ResourceArns":{ + "type":"list", + "member":{"shape":"ResourceArn"} + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -3023,7 +3381,8 @@ "EbsVolume", "LambdaFunction", "NotApplicable", - "EcsService" + "EcsService", + "License" ] }, "RootVolume":{"type":"boolean"}, diff --git a/services/config/pom.xml b/services/config/pom.xml index 650239e3d8a..915a4aa5a37 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/config/src/main/resources/codegen-resources/customization.config b/services/config/src/main/resources/codegen-resources/customization.config index 9a537e0cc59..d113522aaf3 100644 --- a/services/config/src/main/resources/codegen-resources/customization.config +++ b/services/config/src/main/resources/codegen-resources/customization.config @@ -16,7 +16,7 @@ "getComplianceSummaryByResourceType", "getDiscoveredResourceCounts" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "startConfigRulesEvaluation" ] } diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 1470a73a512..4d5ae540a0f 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connectcampaigns/pom.xml b/services/connectcampaigns/pom.xml index c90fe4a77fb..8f7e4254204 100644 --- a/services/connectcampaigns/pom.xml +++ b/services/connectcampaigns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT connectcampaigns AWS Java SDK :: Services :: Connect Campaigns diff --git a/services/connectcases/pom.xml b/services/connectcases/pom.xml index afaa98d72c9..e8ab6dce29e 100644 --- a/services/connectcases/pom.xml +++ b/services/connectcases/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT connectcases AWS Java SDK :: Services :: Connect Cases diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index bdb8ac1a52c..7e6690ddf09 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 0db6fa4b853..4fd993d737b 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/controltower/pom.xml b/services/controltower/pom.xml index 99f800c9bb0..f6f1efe3f42 100644 --- a/services/controltower/pom.xml +++ b/services/controltower/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT controltower AWS Java SDK :: Services :: Control Tower diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index 07a3642acc5..2c63bae3aca 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costandusagereport/src/main/resources/codegen-resources/customization.config b/services/costandusagereport/src/main/resources/codegen-resources/customization.config index 6d58177dfd5..bb24e28984a 100644 --- a/services/costandusagereport/src/main/resources/codegen-resources/customization.config +++ b/services/costandusagereport/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "describeReportDefinitions" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deleteReportDefinition" ] } diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index d0e7f59dbd1..3d44bb6e348 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 costexplorer diff --git a/services/costexplorer/src/main/resources/codegen-resources/customization.config b/services/costexplorer/src/main/resources/codegen-resources/customization.config index a8254fa7e84..09691b4543c 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/customization.config +++ b/services/costexplorer/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["getCostAndUsage"] + "excludedSimpleMethods" : ["getCostAndUsage"] } diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json index e4c99e99633..0a970cbf0e7 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,342 +115,302 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "stringEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws" + "name" ] }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-east-1" - } - ] + "ref": "UseDualStack" }, - "headers": {} - }, - "type": "endpoint" + false + ] + } + ], + "endpoint": { + "url": "https://ce.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "us-east-1" + } + ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws-cn" + "name" ] }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://ce.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "ce", + "signingRegion": "cn-northwest-1" } - ], - "endpoint": { - "url": "https://ce.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "cn-northwest-1" - } - ] + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "headers": {} - }, - "type": "endpoint" + true + ] }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/costexplorer/src/main/resources/codegen-resources/service-2.json b/services/costexplorer/src/main/resources/codegen-resources/service-2.json index 37fbde371ed..3a4839af801 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/service-2.json +++ b/services/costexplorer/src/main/resources/codegen-resources/service-2.json @@ -308,7 +308,7 @@ {"shape":"LimitExceededException"}, {"shape":"DataUnavailableException"} ], - "documentation":"

    Retrieves the details for a Savings Plan recommendation. These details include the hourly data-points that construct the new cost, coverage, and utilization charts.

    " + "documentation":"

    Retrieves the details for a Savings Plan recommendation. These details include the hourly data-points that construct the cost, coverage, and utilization charts.

    " }, "GetSavingsPlansCoverage":{ "name":"GetSavingsPlansCoverage", @@ -828,6 +828,14 @@ "Status":{ "shape":"CostAllocationTagStatus", "documentation":"

    The status of a cost allocation tag.

    " + }, + "LastUpdatedDate":{ + "shape":"ZonedDateTime", + "documentation":"

    The last date that the tag was either activated or deactivated.

    " + }, + "LastUsedDate":{ + "shape":"ZonedDateTime", + "documentation":"

    The last month that the tag was used on an Amazon Web Services resource.

    " } }, "documentation":"

    The cost allocation tag structure. This includes detailed metadata for the CostAllocationTag object.

    " diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index 3c0eef5654b..cf4f3b389ab 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 69434417dbf..7a870eff30e 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databasemigration/src/main/resources/codegen-resources/customization.config b/services/databasemigration/src/main/resources/codegen-resources/customization.config index 76e134825e2..c746548fecc 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/customization.config +++ b/services/databasemigration/src/main/resources/codegen-resources/customization.config @@ -13,7 +13,7 @@ "describeReplicationSubnetGroups", "describeReplicationTasks" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeReplicationTaskAssessmentResults" ] } diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index 91a9ed29312..56aa5eda6b9 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index a96df35d8c9..d0033c99699 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index f17c62eba44..871f0bb482f 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index 8ba289c3d00..53edf822538 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/dax/pom.xml b/services/dax/pom.xml index e60dd7a979e..15863ef10ee 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/detective/pom.xml b/services/detective/pom.xml index d0a03d0cbe5..12e26889cf9 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/detective/src/main/resources/codegen-resources/service-2.json b/services/detective/src/main/resources/codegen-resources/service-2.json index 297e16c60ec..0af98aa3361 100644 --- a/services/detective/src/main/resources/codegen-resources/service-2.json +++ b/services/detective/src/main/resources/codegen-resources/service-2.json @@ -754,12 +754,14 @@ "type":"string", "max":64, "min":1, - "pattern":"^.+@(?:(?:(?!-)[A-Za-z0-9-]{1,62})?[A-Za-z0-9]{1}\\.)+[A-Za-z]{2,63}$" + "pattern":"^.+@(?:(?:(?!-)[A-Za-z0-9-]{1,62})?[A-Za-z0-9]{1}\\.)+[A-Za-z]{2,63}$", + "sensitive":true }, "EmailMessage":{ "type":"string", "max":1000, - "min":1 + "min":1, + "sensitive":true }, "EnableOrganizationAdminAccountRequest":{ "type":"structure", diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 1b5bebe6309..2c0df77fa5b 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devicefarm/src/main/resources/codegen-resources/customization.config b/services/devicefarm/src/main/resources/codegen-resources/customization.config index 9672f49afb1..158dab87a46 100644 --- a/services/devicefarm/src/main/resources/codegen-resources/customization.config +++ b/services/devicefarm/src/main/resources/codegen-resources/customization.config @@ -10,7 +10,7 @@ "listOfferings", "listProjects" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "purchaseOffering", "renewOffering", "listVPCEConfigurations" diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 6b5866e4e08..d3bde2e8aad 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 82438467b1c..42d4fe4baf2 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directconnect/src/main/resources/codegen-resources/customization.config b/services/directconnect/src/main/resources/codegen-resources/customization.config index dcf4f21ff80..4afc46a926a 100644 --- a/services/directconnect/src/main/resources/codegen-resources/customization.config +++ b/services/directconnect/src/main/resources/codegen-resources/customization.config @@ -8,7 +8,7 @@ "describeVirtualGateways", "describeVirtualInterfaces" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createBGPPeer", "deleteBGPPeer", "describeDirectConnectGatewayAttachments", diff --git a/services/directory/pom.xml b/services/directory/pom.xml index d3f0027ee4f..45b373aea00 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index fd66c0bbc4e..bc466c856e3 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index ea2df4c1a78..083269505e9 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdbelastic/pom.xml b/services/docdbelastic/pom.xml index 6a89affe5b6..787cc064a1b 100644 --- a/services/docdbelastic/pom.xml +++ b/services/docdbelastic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT docdbelastic AWS Java SDK :: Services :: Doc DB Elastic diff --git a/services/drs/pom.xml b/services/drs/pom.xml index bb1c4a81a46..0a009b60e95 100644 --- a/services/drs/pom.xml +++ b/services/drs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT drs AWS Java SDK :: Services :: Drs diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index a8bb59e1249..2455da24112 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index fcc647fe8fa..0c7930520f1 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index d40cb3d9c82..a293b33f09a 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/customization.config b/services/ec2/src/main/resources/codegen-resources/customization.config index 1c08d1cf45f..61287b40d6e 100644 --- a/services/ec2/src/main/resources/codegen-resources/customization.config +++ b/services/ec2/src/main/resources/codegen-resources/customization.config @@ -330,7 +330,7 @@ ] } }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "acceptVpcPeeringConnection", "authorizeSecurityGroupIngress", "cancelImportTask", diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 330d367716e..14c861e519a 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -1075,7 +1075,7 @@ }, "input":{"shape":"CreateSubnetCidrReservationRequest"}, "output":{"shape":"CreateSubnetCidrReservationResult"}, - "documentation":"

    Creates a subnet CIDR reservation. For information about subnet CIDR reservations, see Subnet CIDR reservations in the Amazon Virtual Private Cloud User Guide.

    " + "documentation":"

    Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations in the Amazon Virtual Private Cloud User Guide and Assign prefixes to network interfaces in the Amazon Elastic Compute Cloud User Guide.

    " }, "CreateTags":{ "name":"CreateTags", @@ -1549,6 +1549,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteKeyPairRequest"}, + "output":{"shape":"DeleteKeyPairResult"}, "documentation":"

    Deletes the specified key pair, by removing the public key from Amazon EC2.

    " }, "DeleteLaunchTemplate":{ @@ -1569,7 +1570,7 @@ }, "input":{"shape":"DeleteLaunchTemplateVersionsRequest"}, "output":{"shape":"DeleteLaunchTemplateVersionsResult"}, - "documentation":"

    Deletes one or more versions of a launch template. You cannot delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch template using DeleteLaunchTemplate.

    " + "documentation":"

    Deletes one or more versions of a launch template.

    You can't delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch template using DeleteLaunchTemplate.

    You can delete up to 200 launch template versions in a single request. To delete more than 200 versions in a single request, use DeleteLaunchTemplate, which deletes the launch template and all of its versions.

    For more information, see Delete a launch template version in the EC2 User Guide.

    " }, "DeleteLocalGatewayRoute":{ "name":"DeleteLocalGatewayRoute", @@ -12288,7 +12289,7 @@ }, "LogFormat":{ "shape":"String", - "documentation":"

    The fields to include in the flow log record. List the fields in the order in which they should appear. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must include at least one field. For more information about the available fields, see Flow log records in the Amazon VPC User Guide or Transit Gateway Flow Log records in the Amazon Web Services Transit Gateway Guide.

    Specify the fields using the ${field-id} format, separated by spaces. For the CLI, surround this parameter value with single quotes on Linux or double quotes on Windows.

    " + "documentation":"

    The fields to include in the flow log record. List the fields in the order in which they should appear. If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must include at least one field. For more information about the available fields, see Flow log records in the Amazon VPC User Guide or Transit Gateway Flow Log records in the Amazon Web Services Transit Gateway Guide.

    Specify the fields using the ${field-id} format, separated by spaces.

    " }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -14042,7 +14043,7 @@ }, "ReservationType":{ "shape":"SubnetCidrReservationType", - "documentation":"

    The type of reservation.

    The following are valid values:

    • prefix: The Amazon EC2 Prefix Delegation feature assigns the IP addresses to network interfaces that are associated with an instance. For information about Prefix Delegation, see Prefix Delegation for Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

    • explicit: You manually assign the IP addresses to resources that reside in your subnet.

    " + "documentation":"

    The type of reservation. The reservation type determines how the reserved IP addresses are assigned to resources.

    • prefix - Amazon Web Services assigns the reserved IP addresses to network interfaces.

    • explicit - You assign the reserved IP addresses to network interfaces.

    " }, "Description":{ "shape":"String", @@ -14285,7 +14286,7 @@ }, "PacketLength":{ "shape":"Integer", - "documentation":"

    The number of bytes in each packet to mirror. These are bytes after the VXLAN header. Do not specify this parameter when you want to mirror the entire packet. To mirror a subset of the packet, set this to the length (in bytes) that you want to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target.

    If you do not want to mirror the entire packet, use the PacketLength parameter to specify the number of bytes in each packet to mirror.

    " + "documentation":"

    The number of bytes in each packet to mirror. These are bytes after the VXLAN header. Do not specify this parameter when you want to mirror the entire packet. To mirror a subset of the packet, set this to the length (in bytes) that you want to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target.

    If you do not want to mirror the entire packet, use the PacketLength parameter to specify the number of bytes in each packet to mirror.

    For sessions with Network Load Balancer (NLB) Traffic Mirror targets the default PacketLength will be set to 8500. Valid values are 1-8500. Setting a PacketLength greater than 8500 will result in an error response.

    " }, "SessionNumber":{ "shape":"Integer", @@ -15342,12 +15343,12 @@ }, "SubnetIds":{ "shape":"VpcEndpointSubnetIdList", - "documentation":"

    (Interface and Gateway Load Balancer endpoints) The IDs of the subnets in which to create an endpoint network interface. For a Gateway Load Balancer endpoint, you can specify only one subnet.

    ", + "documentation":"

    (Interface and Gateway Load Balancer endpoints) The IDs of the subnets in which to create endpoint network interfaces. For a Gateway Load Balancer endpoint, you can specify only one subnet.

    ", "locationName":"SubnetId" }, "SecurityGroupIds":{ "shape":"VpcEndpointSecurityGroupIdList", - "documentation":"

    (Interface endpoint) The IDs of the security groups to associate with the endpoint network interface. If this parameter is not specified, we use the default security group for the VPC.

    ", + "documentation":"

    (Interface endpoint) The IDs of the security groups to associate with the endpoint network interfaces. If this parameter is not specified, we use the default security group for the VPC.

    ", "locationName":"SecurityGroupId" }, "IpAddressType":{ @@ -15370,6 +15371,11 @@ "shape":"TagSpecificationList", "documentation":"

    The tags to associate with the endpoint.

    ", "locationName":"TagSpecification" + }, + "SubnetConfigurations":{ + "shape":"SubnetConfigurationsList", + "documentation":"

    The subnet configurations for the endpoint.

    ", + "locationName":"SubnetConfiguration" } } }, @@ -16427,6 +16433,21 @@ } } }, + "DeleteKeyPairResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "documentation":"

    Is true if the request succeeds, and an error otherwise.

    ", + "locationName":"return" + }, + "KeyPairId":{ + "shape":"String", + "documentation":"

    The ID of the key pair.

    ", + "locationName":"keyPairId" + } + } + }, "DeleteLaunchTemplateRequest":{ "type":"structure", "members":{ @@ -16472,7 +16493,7 @@ }, "Versions":{ "shape":"VersionStringList", - "documentation":"

    The version numbers of one or more launch template versions to delete.

    ", + "documentation":"

    The version numbers of one or more launch template versions to delete. You can specify up to 200 launch template version numbers.

    ", "locationName":"LaunchTemplateVersion" } } @@ -20339,7 +20360,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

    The filters.

    • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

    • architecture - The instance architecture (i386 | x86_64 | arm64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched.

    • client-token - The idempotency token you provided when you launched the instance.

    • dns-name - The public DNS name of the instance.

    • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

    • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

    • hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors.

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IPv4 address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day.

    • metadata-options.http-tokens - The metadata request authorization state (optional | required)

    • metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64)

    • metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled)

    • metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled)

    • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

    • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

    • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

    • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

    • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

    • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

    • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.description - The description of the network interface.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.private-dns-name - The private DNS name of the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

    • owner-id - The Amazon Web Services account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • placement-partition-number - The partition in which the instance is located.

    • platform - The platform. To list only Windows instances, use windows.

    • private-dns-name - The private IPv4 DNS name of the instance.

    • private-ip-address - The private IPv4 address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

    • root-device-name - The device name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot Instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    • tenancy - The tenancy of an instance (dedicated | default | host).

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    ", + "documentation":"

    The filters.

    • affinity - The affinity setting for an instance running on a Dedicated Host (default | host).

    • architecture - The instance architecture (i386 | x86_64 | arm64).

    • availability-zone - The Availability Zone of the instance.

    • block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z.

    • block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination.

    • block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh).

    • block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached).

    • block-device-mapping.volume-id - The volume ID of the EBS volume.

    • boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred).

    • capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched.

    • capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none).

    • capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation.

    • capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group.

    • client-token - The idempotency token you provided when you launched the instance.

    • current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi).

    • dns-name - The public DNS name of the instance.

    • ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O.

    • ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA.

    • enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.

    • hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation.

    • host-id - The ID of the Dedicated Host on which the instance is running, if applicable.

    • hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors.

    • iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN.

    • iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID.

    • iam-instance-profile.name - The instance profile associated with the instance. Specified as an name.

    • image-id - The ID of the image used to launch the instance.

    • instance-id - The ID of the instance.

    • instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled Instance (spot | scheduled).

    • instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped).

    • instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped).

    • instance-type - The type of instance (for example, t2.micro).

    • instance.group-id - The ID of the security group for the instance.

    • instance.group-name - The name of the security group for the instance.

    • ip-address - The public IPv4 address of the instance.

    • ipv6-address - The IPv6 address of the instance.

    • kernel-id - The kernel ID.

    • key-name - The name of the key pair used when the instance was launched.

    • launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on).

    • launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day.

    • license-pool -

    • maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default).

    • metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled)

    • metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled).

    • metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled).

    • metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64)

    • metadata-options.http-tokens - The metadata request authorization state (optional | required)

    • metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled)

    • metadata-options.state - The state of the metadata option changes (pending | applied).

    • monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled).

    • network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address.

    • network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface.

    • network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface.

    • network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface.

    • network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

    • network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

    • network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

    • network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

    • network-interface.attachment.attachment-id - The ID of the interface attachment.

    • network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached.

    • network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • network-interface.attachment.device-index - The device index to which the network interface is attached.

    • network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • network-interface.attachment.attach-time - The time that the network interface was attached to an instance.

    • network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated.

    • network-interface.availability-zone - The Availability Zone for the network interface.

    • network-interface.description - The description of the network interface.

    • network-interface.group-id - The ID of a security group associated with the network interface.

    • network-interface.group-name - The name of a security group associated with the network interface.

    • network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface.

    • network-interface.mac-address - The MAC address of the network interface.

    • network-interface.network-interface-id - The ID of the network interface.

    • network-interface.owner-id - The ID of the owner of the network interface.

    • network-interface.private-dns-name - The private DNS name of the network interface.

    • network-interface.requester-id - The requester ID for the network interface.

    • network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services.

    • network-interface.status - The status of the network interface (available) | in-use).

    • network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • network-interface.subnet-id - The ID of the subnet for the network interface.

    • network-interface.vpc-id - The ID of the VPC for the network interface.

    • outpost-arn - The Amazon Resource Name (ARN) of the Outpost.

    • owner-id - The Amazon Web Services account ID of the instance owner.

    • placement-group-name - The name of the placement group for the instance.

    • placement-partition-number - The partition in which the instance is located.

    • platform - The platform. To list only Windows instances, use windows.

    • platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web).

    • private-dns-name - The private IPv4 DNS name of the instance.

    • private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records.

    • private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.

    • private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name).

    • private-ip-address - The private IPv4 address of the instance.

    • product-code - The product code associated with the AMI used to launch the instance.

    • product-code.type - The type of product code (devpay | marketplace).

    • ramdisk-id - The RAM disk ID.

    • reason - The reason for the current state of the instance (for example, shows \"User Initiated [date]\" when you stop or terminate the instance). Similar to the state-reason-code filter.

    • requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    • reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID.

    • root-device-name - The device name of the root device volume (for example, /dev/sda1).

    • root-device-type - The type of the root device volume (ebs | instance-store).

    • source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC.

    • spot-instance-request-id - The ID of the Spot Instance request.

    • state-reason-code - The reason code for the state change.

    • state-reason-message - A message that describes the state change.

    • subnet-id - The ID of the subnet for the instance.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    • tenancy - The tenancy of an instance (dedicated | default | host).

    • tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0).

    • usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202).

    • usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z.

    • virtualization-type - The virtualization type of the instance (paravirtual | hvm).

    • vpc-id - The ID of the VPC that the instance is running in.

    ", "locationName":"Filter" }, "InstanceIds":{ @@ -30124,7 +30145,7 @@ "locationName":"instanceId" }, "PasswordData":{ - "shape":"String", + "shape":"PasswordData", "documentation":"

    The password of the instance. Returns an empty string if the password is not available.

    ", "locationName":"passwordData" }, @@ -35341,7 +35362,23 @@ "m7i-flex.xlarge", "m7i-flex.2xlarge", "m7i-flex.4xlarge", - "m7i-flex.8xlarge" + "m7i-flex.8xlarge", + "m7a.medium", + "m7a.large", + "m7a.xlarge", + "m7a.2xlarge", + "m7a.4xlarge", + "m7a.8xlarge", + "m7a.12xlarge", + "m7a.16xlarge", + "m7a.24xlarge", + "m7a.32xlarge", + "m7a.48xlarge", + "m7a.metal-48xl", + "hpc7a.12xlarge", + "hpc7a.24xlarge", + "hpc7a.48xlarge", + "hpc7a.96xlarge" ] }, "InstanceTypeHypervisor":{ @@ -41138,7 +41175,7 @@ }, "PacketLength":{ "shape":"Integer", - "documentation":"

    The number of bytes in each packet to mirror. These are bytes after the VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target. Do not specify this parameter when you want to mirror the entire packet.

    " + "documentation":"

    The number of bytes in each packet to mirror. These are bytes after the VXLAN header. To mirror a subset, set this to the length (in bytes) to mirror. For example, if you set this value to 100, then the first 100 bytes that meet the filter criteria are copied to the target. Do not specify this parameter when you want to mirror the entire packet.

    For sessions with Network Load Balancer (NLB) traffic mirror targets, the default PacketLength will be set to 8500. Valid values are 1-8500. Setting a PacketLength greater than 8500 will result in an error response.

    " }, "SessionNumber":{ "shape":"Integer", @@ -41856,12 +41893,12 @@ }, "AddSecurityGroupIds":{ "shape":"VpcEndpointSecurityGroupIdList", - "documentation":"

    (Interface endpoint) The IDs of the security groups to associate with the network interface.

    ", + "documentation":"

    (Interface endpoint) The IDs of the security groups to associate with the endpoint network interfaces.

    ", "locationName":"AddSecurityGroupId" }, "RemoveSecurityGroupIds":{ "shape":"VpcEndpointSecurityGroupIdList", - "documentation":"

    (Interface endpoint) The IDs of the security groups to disassociate from the network interface.

    ", + "documentation":"

    (Interface endpoint) The IDs of the security groups to disassociate from the endpoint network interfaces.

    ", "locationName":"RemoveSecurityGroupId" }, "IpAddressType":{ @@ -41875,6 +41912,11 @@ "PrivateDnsEnabled":{ "shape":"Boolean", "documentation":"

    (Interface endpoint) Indicates whether a private hosted zone is associated with the VPC.

    " + }, + "SubnetConfigurations":{ + "shape":"SubnetConfigurationsList", + "documentation":"

    The subnet configurations for the endpoint.

    ", + "locationName":"SubnetConfiguration" } } }, @@ -44056,6 +44098,10 @@ "monthly" ] }, + "PasswordData":{ + "type":"string", + "sensitive":true + }, "PathComponent":{ "type":"structure", "members":{ @@ -49396,13 +49442,21 @@ "locationName":"uploadPolicy" }, "UploadPolicySignature":{ - "shape":"String", + "shape":"S3StorageUploadPolicySignature", "documentation":"

    The signature of the JSON document.

    ", "locationName":"uploadPolicySignature" } }, "documentation":"

    Describes the storage parameters for Amazon S3 and Amazon S3 buckets for an instance store-backed AMI.

    " }, + "S3StorageUploadPolicy":{ + "type":"string", + "sensitive":true + }, + "S3StorageUploadPolicySignature":{ + "type":"string", + "sensitive":true + }, "SSEType":{ "type":"string", "enum":[ @@ -52493,6 +52547,31 @@ "explicit" ] }, + "SubnetConfiguration":{ + "type":"structure", + "members":{ + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

    The ID of the subnet.

    " + }, + "Ipv4":{ + "shape":"String", + "documentation":"

    The IPv4 address to assign to the endpoint network interface in the subnet. You must provide an IPv4 address if the VPC endpoint supports IPv4.

    If you specify an IPv4 address when modifying a VPC endpoint, we replace the existing endpoint network interface with a new endpoint network interface with this IP address. This process temporarily disconnects the subnet and the VPC endpoint.

    " + }, + "Ipv6":{ + "shape":"String", + "documentation":"

    The IPv6 address to assign to the endpoint network interface in the subnet. You must provide an IPv6 address if the VPC endpoint supports IPv6.

    If you specify an IPv6 address when modifying a VPC endpoint, we replace the existing endpoint network interface with a new endpoint network interface with this IP address. This process temporarily disconnects the subnet and the VPC endpoint.

    " + } + }, + "documentation":"

    Describes the configuration of a subnet for a VPC endpoint.

    " + }, + "SubnetConfigurationsList":{ + "type":"list", + "member":{ + "shape":"SubnetConfiguration", + "locationName":"item" + } + }, "SubnetId":{"type":"string"}, "SubnetIdStringList":{ "type":"list", diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index ef5551eed22..f488f3e48b7 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index 852c1af8473..0419eb4b716 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index 14440342a65..adac1768433 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 821bac01625..bcf3aade0d0 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/customization.config b/services/ecs/src/main/resources/codegen-resources/customization.config index 58e305ca06b..b0f923be73f 100644 --- a/services/ecs/src/main/resources/codegen-resources/customization.config +++ b/services/ecs/src/main/resources/codegen-resources/customization.config @@ -10,7 +10,7 @@ "listTaskDefinitionFamilies", "listTaskDefinitions" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "discoverPollEndpoint", "registerContainerInstance", "submitContainerStateChange", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index f55f3d96d77..af015e5d699 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/efs/src/main/resources/codegen-resources/customization.config b/services/efs/src/main/resources/codegen-resources/customization.config index d2fe9be19d6..ea0753f0e30 100644 --- a/services/efs/src/main/resources/codegen-resources/customization.config +++ b/services/efs/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "describeFileSystems" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeMountTargets" ] } diff --git a/services/eks/pom.xml b/services/eks/pom.xml index ad9730b620f..1d8f4a07b03 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index dcb5c9d4b09..eb672e59422 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticache/src/main/resources/codegen-resources/customization.config b/services/elasticache/src/main/resources/codegen-resources/customization.config index ebe2ffd7cdf..8f466b4cef8 100644 --- a/services/elasticache/src/main/resources/codegen-resources/customization.config +++ b/services/elasticache/src/main/resources/codegen-resources/customization.config @@ -10,7 +10,7 @@ "describeReservedCacheNodesOfferings", "describeSnapshots" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeCacheSecurityGroups", "listAllowedNodeTypeModifications" ] diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index aba0b7f911e..fa1994843ad 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config b/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config index b2845ceed17..f0f2660d273 100644 --- a/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config +++ b/services/elasticbeanstalk/src/main/resources/codegen-resources/customization.config @@ -17,7 +17,7 @@ ] } }, - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "abortEnvironmentUpdate", "composeEnvironments", "deletePlatformVersion", diff --git a/services/elasticinference/pom.xml b/services/elasticinference/pom.xml index a04cda02a18..dde82423b2a 100644 --- a/services/elasticinference/pom.xml +++ b/services/elasticinference/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT elasticinference AWS Java SDK :: Services :: Elastic Inference diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 1a5e796e3d5..7ed4b27813b 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index eb38fefa86e..6c8d25ca977 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/customization.config b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/customization.config index 3d22a294f4b..45f2d8ee516 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/customization.config +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ "describeSSLPolicies", "describeTargetGroups" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeRules", "describeListeners" ] diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index 42e690de0b2..b1c95c5186e 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index adc295ead0a..0e384fb95e7 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/emr/pom.xml b/services/emr/pom.xml index e6179ac876c..1ca2e37b28d 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/customization.config b/services/emr/src/main/resources/codegen-resources/customization.config index 1bc9a6a3427..1e066e00e14 100644 --- a/services/emr/src/main/resources/codegen-resources/customization.config +++ b/services/emr/src/main/resources/codegen-resources/customization.config @@ -14,7 +14,7 @@ ] } }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "cancelSteps", "modifyInstanceGroups", "describeJobFlows" diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index 6b3b3e6bda0..b524b12ab34 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrserverless/pom.xml b/services/emrserverless/pom.xml index 7e2c2ba2bf3..136fe460fd4 100644 --- a/services/emrserverless/pom.xml +++ b/services/emrserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT emrserverless AWS Java SDK :: Services :: EMR Serverless diff --git a/services/entityresolution/pom.xml b/services/entityresolution/pom.xml index 716d634e766..ae36115ee08 100644 --- a/services/entityresolution/pom.xml +++ b/services/entityresolution/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT entityresolution AWS Java SDK :: Services :: Entity Resolution diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index a45f5847df6..d11a84d24d6 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/evidently/pom.xml b/services/evidently/pom.xml index acc25afde89..eb1db26e8c8 100644 --- a/services/evidently/pom.xml +++ b/services/evidently/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT evidently AWS Java SDK :: Services :: Evidently diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index ec71cb652e9..4227d81d21e 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json index 5299e3d75a8..2ae22d4be88 100644 --- a/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/finspace/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://finspace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://finspace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://finspace-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://finspace-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://finspace.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://finspace.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://finspace.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://finspace.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/finspace/src/main/resources/codegen-resources/service-2.json b/services/finspace/src/main/resources/codegen-resources/service-2.json index e0d63a365a5..79383669015 100644 --- a/services/finspace/src/main/resources/codegen-resources/service-2.json +++ b/services/finspace/src/main/resources/codegen-resources/service-2.json @@ -528,6 +528,7 @@ {"shape":"AccessDeniedException"}, {"shape":"LimitExceededException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

    Updates the databases mounted on a kdb cluster, which includes the changesetId and all the dbPaths to be cached. This API does not allow you to change a database name or add a database if you created a cluster without one.

    Using this API you can point a cluster to a different changeset and modify a list of partitions being cached.

    " @@ -610,6 +611,7 @@ "AccessDeniedException":{ "type":"structure", "members":{ + "message":{"shape":"errorMessage"} }, "documentation":"

    You do not have sufficient access to perform this action.

    ", "error":{"httpStatusCode":403}, @@ -680,7 +682,7 @@ "documentation":"

    The number of instances running in a cluster.

    " } }, - "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

    " + "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, and number of instances.

    " }, "ChangeRequest":{ "type":"structure", @@ -951,7 +953,7 @@ }, "capacityConfiguration":{ "shape":"CapacityConfiguration", - "documentation":"

    A structure for the metadata of a cluster. It includes information about like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

    " + "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, and number of instances.

    " }, "releaseLabel":{ "shape":"ReleaseLabel", @@ -1036,7 +1038,7 @@ }, "capacityConfiguration":{ "shape":"CapacityConfiguration", - "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

    " + "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, and number of instances.

    " }, "releaseLabel":{ "shape":"ReleaseLabel", @@ -1249,7 +1251,7 @@ }, "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " }, "environmentId":{ "shape":"IdType", @@ -1797,7 +1799,7 @@ }, "capacityConfiguration":{ "shape":"CapacityConfiguration", - "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, number of instances, and the port used while establishing a connection.

    " + "documentation":"

    A structure for the metadata of a cluster. It includes information like the CPUs needed, memory of instances, and number of instances.

    " }, "releaseLabel":{ "shape":"ReleaseLabel", @@ -1855,7 +1857,7 @@ "members":{ "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    ", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    ", "location":"querystring", "locationName":"userArn" }, @@ -2060,7 +2062,7 @@ }, "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " }, "environmentId":{ "shape":"IdType", @@ -2076,6 +2078,25 @@ "type":"string", "enum":["IP_V4"] }, + "IcmpTypeCode":{ + "type":"structure", + "required":[ + "type", + "code" + ], + "members":{ + "type":{ + "shape":"IcmpTypeOrCode", + "documentation":"

    The ICMP type. A value of -1 means all types.

    " + }, + "code":{ + "shape":"IcmpTypeOrCode", + "documentation":"

    The ICMP code. A value of -1 means all codes for the specified ICMP type.

    " + } + }, + "documentation":"

    Defines the ICMP protocol that consists of the ICMP type and code.

    " + }, + "IcmpTypeOrCode":{"type":"integer"}, "IdType":{ "type":"string", "max":26, @@ -2224,7 +2245,7 @@ }, "azMode":{ "shape":"KxAzMode", - "documentation":"

    The number of availability zones assigned per cluster. This can be one of the following

    • SINGLE – Assigns one availability zone per cluster.

    • MULTI – Assigns all the availability zones per cluster.

    " + "documentation":"

    The number of availability zones assigned per cluster. This can be one of the following:

    • SINGLE – Assigns one availability zone per cluster.

    • MULTI – Assigns all the availability zones per cluster.

    " }, "availabilityZoneId":{ "shape":"AvailabilityZoneId", @@ -2313,7 +2334,7 @@ "type":"string", "max":50, "min":1, - "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_:.]*" + "pattern":"^[a-zA-Z0-9_:./]+$" }, "KxCommandLineArguments":{ "type":"list", @@ -2386,6 +2407,24 @@ "type":"list", "member":{"shape":"KxDatabaseListEntry"} }, + "KxDeploymentConfiguration":{ + "type":"structure", + "required":["deploymentStrategy"], + "members":{ + "deploymentStrategy":{ + "shape":"KxDeploymentStrategy", + "documentation":"

    The type of deployment that you want on a cluster.

    • ROLLING – This options loads the updated database by stopping the exiting q process and starting a new q process with updated configuration.

    • NO_RESTART – This option loads the updated database on the running q process without stopping it. This option is quicker as it reduces the turn around time to update a kdb database changeset configuration on a cluster.

    " + } + }, + "documentation":"

    The configuration that allows you to choose how you want to update the databases on a cluster. Depending on the option you choose, you can reduce the time it takes to update the database changesets on to a cluster.

    " + }, + "KxDeploymentStrategy":{ + "type":"string", + "enum":[ + "NO_RESTART", + "ROLLING" + ] + }, "KxEnvironment":{ "type":"structure", "members":{ @@ -2511,7 +2550,7 @@ }, "size":{ "shape":"KxSavedownStorageSize", - "documentation":"

    The size of temporary storage in bytes.

    " + "documentation":"

    The size of temporary storage in gibibytes.

    " } }, "documentation":"

    The size and type of temporary storage that is used to hold data during the savedown process. All the data written to this storage space is lost when the cluster node is restarted.

    " @@ -2530,7 +2569,7 @@ "members":{ "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " }, "userName":{ "shape":"KxUserNameString", @@ -2877,16 +2916,57 @@ "min":1, "pattern":"^[a-zA-Z0-9]{1,50}$" }, + "NetworkACLConfiguration":{ + "type":"list", + "member":{"shape":"NetworkACLEntry"}, + "max":100, + "min":1 + }, + "NetworkACLEntry":{ + "type":"structure", + "required":[ + "ruleNumber", + "protocol", + "ruleAction", + "cidrBlock" + ], + "members":{ + "ruleNumber":{ + "shape":"RuleNumber", + "documentation":"

    The rule number for the entry. For example 100. All the network ACL entries are processed in ascending order by rule number.

    " + }, + "protocol":{ + "shape":"Protocol", + "documentation":"

    The protocol number. A value of -1 means all the protocols.

    " + }, + "ruleAction":{ + "shape":"RuleAction", + "documentation":"

    Indicates whether to allow or deny the traffic that matches the rule.

    " + }, + "portRange":{ + "shape":"PortRange", + "documentation":"

    The range of ports the rule applies to.

    " + }, + "icmpTypeCode":{ + "shape":"IcmpTypeCode", + "documentation":"

    Defines the ICMP protocol that consists of the ICMP type and code.

    " + }, + "cidrBlock":{ + "shape":"ValidCIDRBlock", + "documentation":"

    The IPv4 network range to allow or deny, in CIDR notation. For example, 172.16.0.0/24. We modify the specified CIDR block to its canonical form. For example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

    " + } + }, + "documentation":"

    The network access control list (ACL) is an optional layer of security for your VPC that acts as a firewall for controlling traffic in and out of one or more subnets. The entry is a set of numbered ingress and egress rules that determine whether a packet should be allowed in or out of a subnet associated with the ACL. We process the entries in the ACL according to the rule numbers, in ascending order.

    " + }, "NodeCount":{ "type":"integer", - "max":5, "min":1 }, "NodeType":{ "type":"string", "max":32, "min":1, - "pattern":"^[a-zA-Z0-9._]+" + "pattern":"^[a-zA-Z0-9._]+$" }, "PaginationToken":{ "type":"string", @@ -2894,11 +2974,40 @@ "min":1, "pattern":".*" }, + "Port":{ + "type":"integer", + "max":65535, + "min":0 + }, + "PortRange":{ + "type":"structure", + "required":[ + "from", + "to" + ], + "members":{ + "from":{ + "shape":"Port", + "documentation":"

    The first port in the range.

    " + }, + "to":{ + "shape":"Port", + "documentation":"

    The last port in the range.

    " + } + }, + "documentation":"

    The range of ports the rule applies to.

    " + }, + "Protocol":{ + "type":"string", + "max":5, + "min":1, + "pattern":"^-1|[0-9]+$" + }, "ReleaseLabel":{ "type":"string", "max":16, "min":1, - "pattern":"^[a-zA-Z0-9._-]+" + "pattern":"^[a-zA-Z0-9._-]+$" }, "ResourceAlreadyExistsException":{ "type":"structure", @@ -2929,6 +3038,18 @@ "min":20, "pattern":"^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" }, + "RuleAction":{ + "type":"string", + "enum":[ + "allow", + "deny" + ] + }, + "RuleNumber":{ + "type":"integer", + "max":32766, + "min":1 + }, "S3Bucket":{ "type":"string", "max":255, @@ -3075,6 +3196,7 @@ "ThrottlingException":{ "type":"structure", "members":{ + "message":{"shape":"errorMessage"} }, "documentation":"

    The request was denied due to request throttling.

    ", "error":{"httpStatusCode":429}, @@ -3095,6 +3217,10 @@ "routableCIDRSpace":{ "shape":"ValidCIDRSpace", "documentation":"

    The routing CIDR on behalf of kdb environment. It could be any \"/26 range in the 100.64.0.0 CIDR space. After providing, it will be added to the customer's transit gateway routing table so that the traffics could be routed to kdb network.

    " + }, + "attachmentNetworkAclConfiguration":{ + "shape":"NetworkACLConfiguration", + "documentation":"

    The rules that define how you manage the outbound traffic from kdb network to your internal network.

    " } }, "documentation":"

    The structure of the transit gateway and network configuration that is used to connect the kdb environment to an internal network.

    " @@ -3186,11 +3312,16 @@ }, "clientToken":{ "shape":"ClientTokenString", - "documentation":"

    A token that ensures idempotency. This token expires in 10 minutes.

    " + "documentation":"

    A token that ensures idempotency. This token expires in 10 minutes.

    ", + "idempotencyToken":true }, "databases":{ "shape":"KxDatabaseConfigurations", "documentation":"

    The structure of databases mounted on the cluster.

    " + }, + "deploymentConfiguration":{ + "shape":"KxDeploymentConfiguration", + "documentation":"

    The configuration that allows you to choose how you want to update the databases on a cluster.

    " } } }, @@ -3470,7 +3601,7 @@ }, "userArn":{ "shape":"KxUserArn", - "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " + "documentation":"

    The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

    " }, "environmentId":{ "shape":"IdType", @@ -3482,10 +3613,13 @@ } } }, - "ValidCIDRSpace":{ + "ValidCIDRBlock":{ "type":"string", - "pattern":"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/26$" + "max":18, + "min":1, + "pattern":"^(?:\\d{1,3}\\.){3}\\d{1,3}(?:\\/(?:3[0-2]|[12]\\d|\\d))$" }, + "ValidCIDRSpace":{"type":"string"}, "ValidHostname":{ "type":"string", "max":255, diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index 28fa3c4089c..7287d16523a 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 9fc57ac1a88..ccb86431d13 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/firehose/src/it/java/software/amazon/awssdk/services/firehose/ServiceIntegrationTest.java b/services/firehose/src/it/java/software/amazon/awssdk/services/firehose/ServiceIntegrationTest.java deleted file mode 100644 index 95277110d05..00000000000 --- a/services/firehose/src/it/java/software/amazon/awssdk/services/firehose/ServiceIntegrationTest.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.firehose; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; -import static software.amazon.awssdk.testutils.SdkAsserts.assertNotEmpty; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.List; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; -import software.amazon.awssdk.awscore.exception.AwsServiceException; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.firehose.model.CreateDeliveryStreamRequest; -import software.amazon.awssdk.services.firehose.model.ListDeliveryStreamsRequest; -import software.amazon.awssdk.services.firehose.model.ListDeliveryStreamsResponse; -import software.amazon.awssdk.services.firehose.model.PutRecordBatchRequest; -import software.amazon.awssdk.services.firehose.model.PutRecordBatchResponseEntry; -import software.amazon.awssdk.services.firehose.model.PutRecordRequest; -import software.amazon.awssdk.services.firehose.model.Record; -import software.amazon.awssdk.services.firehose.model.S3DestinationConfiguration; -import software.amazon.awssdk.testutils.service.AwsTestBase; - - -public class ServiceIntegrationTest extends AwsTestBase { - - private static final String DEVLIVERY_STREAM_NAME = "java-sdk-delivery-stream-" - + System.currentTimeMillis(); - private static final String FAKE_S3_BUCKET_ARN = "arn:aws:s3:::fake-s3-bucket-arn"; - private static final String FAKE_IAM_ROLE_ARN = "arn:aws:iam:::fake-iam-role-arn"; - - private static FirehoseClient firehose; - - - @BeforeClass - public static void setup() throws FileNotFoundException, IOException { - // setUpCredentials(); - // firehose = new AmazonKinesisFirehoseClient(credentials); - // s3 = new AmazonS3Client(credentials); - - // TODO: firehose can't whitelist our shared account at this point, so - // for now we are using the test account provided by the firehose team - firehose = FirehoseClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN).build(); - } - - @AfterClass - public static void tearDown() { - // firehose.deleteDeliveryStream(new DeleteDeliveryStreamRequest() - // .withDeliveryStreamName(DEVLIVERY_STREAM_NAME)); - } - - // @Test - // Nope, can't make it work without full access to S3 and IAM - public void testOperations() { - - // create delivery stream - CreateDeliveryStreamRequest request = - CreateDeliveryStreamRequest.builder() - .deliveryStreamName(DEVLIVERY_STREAM_NAME) - .s3DestinationConfiguration(S3DestinationConfiguration.builder() - .bucketARN(FAKE_S3_BUCKET_ARN) - .roleARN(FAKE_IAM_ROLE_ARN) - .build()) - .build(); - firehose.createDeliveryStream(request); - - // put record - String recordId = firehose.putRecord(PutRecordRequest.builder() - .deliveryStreamName(DEVLIVERY_STREAM_NAME) - .record(Record.builder() - .data(SdkBytes.fromByteArray(new byte[] {0, 1, 2})) - .build()) - .build() - ).recordId(); - assertNotEmpty(recordId); - - // put record batch - List entries = firehose.putRecordBatch( - PutRecordBatchRequest.builder() - .deliveryStreamName(DEVLIVERY_STREAM_NAME) - .records(Record.builder().data(SdkBytes.fromByteArray(new byte[] {0})).build(), - Record.builder().data(SdkBytes.fromByteArray(new byte[] {1})).build()) - .build() - ).requestResponses(); - assertEquals(2, entries.size()); - for (PutRecordBatchResponseEntry entry : entries) { - if (entry.errorCode() == null) { - assertNotEmpty(entry.recordId()); - } else { - assertNotEmpty(entry.errorMessage()); - } - } - } - - @Test - public void testListDeliveryStreams() { - ListDeliveryStreamsResponse result = firehose - .listDeliveryStreams(ListDeliveryStreamsRequest.builder().build()); - assertNotNull(result.deliveryStreamNames()); - assertNotNull(result.hasMoreDeliveryStreams()); - } - - @Test - public void testCreateDeliveryStream_InvalidParameter() { - try { - firehose.createDeliveryStream(CreateDeliveryStreamRequest.builder().build()); - fail("ValidationException is expected."); - } catch (AwsServiceException exception) { - assertEquals("ValidationException", exception.awsErrorDetails().errorCode()); - assertNotEmpty(exception.awsErrorDetails().errorMessage()); - } - } - -} diff --git a/services/fis/pom.xml b/services/fis/pom.xml index 7d054d67e15..59cb712f2cb 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fms/pom.xml b/services/fms/pom.xml index a6860a565ab..0ec52e0b503 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/fms/src/main/resources/codegen-resources/customization.config b/services/fms/src/main/resources/codegen-resources/customization.config index f19e48d4287..f5fe1526e9c 100644 --- a/services/fms/src/main/resources/codegen-resources/customization.config +++ b/services/fms/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "getAdminAccount", "getNotificationChannel", "listMemberAccounts", diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index b2af7ebd8da..e8251aa5b6b 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 206ab67690b..6f5bbc537d6 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 78c54254c90..1c89baea7d1 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index eb97d843d2d..484b209ccda 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index 131e139cb34..18d5f518af6 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -107,7 +107,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

    ", + "documentation":"

    Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported on all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

    ", "idempotent":true }, "CreateDataRepositoryTask":{ @@ -127,7 +127,7 @@ {"shape":"InternalServerError"}, {"shape":"DataRepositoryTaskExecuting"} ], - "documentation":"

    Creates an Amazon FSx for Lustre data repository task. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system.

    You use import and export data repository tasks to perform bulk operations between your FSx for Lustre file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository.

    You use release data repository tasks to release data from your file system for files that are archived to S3. The metadata of released files remains on the file system so users or applications can still access released files by reading the files again, which will restore data from Amazon S3 to the FSx for Lustre file system.

    To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    ", + "documentation":"

    Creates an Amazon FSx for Lustre data repository task. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system.

    You use import and export data repository tasks to perform bulk operations between your FSx for Lustre file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository.

    You use release data repository tasks to release data from your file system for files that are exported to S3. The metadata of released files remains on the file system so users or applications can still access released files by reading the files again, which will restore data from Amazon S3 to the FSx for Lustre file system.

    To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

    ", "idempotent":true }, "CreateFileCache":{ @@ -304,7 +304,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    ", + "documentation":"

    Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported on all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    ", "idempotent":true }, "DeleteFileCache":{ @@ -340,7 +340,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

    To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleFileSystem operation.

    By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

    The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

    If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

    The data in a deleted file system is also deleted and can't be recovered by any means.

    ", + "documentation":"

    Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

    To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleFileSystem operation.

    By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

    To delete an Amazon FSx for Lustre file system, first unmount it from every connected Amazon EC2 instance, then provide a FileSystemId value to the DeleFileSystem operation. By default, Amazon FSx will not take a final backup when the DeleteFileSystem operation is invoked. On file systems not linked to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all of your data is written back to S3 before deleting your file system, you can either monitor for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can run an export data repository task. If you have automatic export enabled and want to use an export data repository task, you have to disable automatic export before executing the export data repository task.

    The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

    If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

    The data in a deleted file system is also deleted and can't be recovered by any means.

    ", "idempotent":true }, "DeleteSnapshot":{ @@ -424,7 +424,7 @@ {"shape":"InvalidDataRepositoryType"}, {"shape":"InternalServerError"} ], - "documentation":"

    Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

    When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

    ", + "documentation":"

    Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2,15 file systems, excluding scratch_1 deployment type.

    You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

    When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

    ", "idempotent":true }, "DescribeDataRepositoryTasks":{ @@ -651,7 +651,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    ", + "documentation":"

    Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    ", "idempotent":true }, "UpdateFileCache":{ @@ -692,7 +692,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"

    Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

    For FSx for Windows File Server file systems, you can update the following properties:

    • AuditLogConfiguration

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • SelfManagedActiveDirectoryConfiguration

    • StorageCapacity

    • StorageType

    • ThroughputCapacity

    • DiskIopsConfiguration

    • WeeklyMaintenanceStartTime

    For FSx for Lustre file systems, you can update the following properties:

    • AutoImportPolicy

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DataCompressionType

    • LogConfiguration

    • LustreRootSquashConfiguration

    • StorageCapacity

    • WeeklyMaintenanceStartTime

    For FSx for ONTAP file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • FsxAdminPassword

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    For FSx for OpenZFS file systems, you can update the following properties:

    • AutomaticBackupRetentionDays

    • CopyTagsToBackups

    • CopyTagsToVolumes

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    " + "documentation":"

    Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

    For FSx for Windows File Server file systems, you can update the following properties:

    • AuditLogConfiguration

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • SelfManagedActiveDirectoryConfiguration

    • StorageCapacity

    • StorageType

    • ThroughputCapacity

    • DiskIopsConfiguration

    • WeeklyMaintenanceStartTime

    For FSx for Lustre file systems, you can update the following properties:

    • AutoImportPolicy

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DataCompressionType

    • LogConfiguration

    • LustreRootSquashConfiguration

    • StorageCapacity

    • WeeklyMaintenanceStartTime

    For FSx for ONTAP file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • FsxAdminPassword

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    For FSx for OpenZFS file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • CopyTagsToBackups

    • CopyTagsToVolumes

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    " }, "UpdateSnapshot":{ "name":"UpdateSnapshot", @@ -1355,11 +1355,11 @@ "members":{ "Type":{ "shape":"DataRepositoryTaskType", - "documentation":"

    Specifies the type of data repository task to create.

    • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

    • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

    • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that are archived and that meet your specified release criteria.

    • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

    " + "documentation":"

    Specifies the type of data repository task to create.

    • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

    • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

    • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that have been exported to a linked S3 bucket and that meet your specified release criteria.

    • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

    " }, "Paths":{ "shape":"DataRepositoryTaskPaths", - "documentation":"

    A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all archived files that meet the last accessed time criteria (for release tasks).

    • For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1.

    • For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional).

    • For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release archived files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all archived files in the file system, specify a forward slash (/) as the path.

      A file must also meet the last accessed time criteria specified in for the file to be released.

    " + "documentation":"

    A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all exported files that meet the last accessed time criteria (for release tasks).

    • For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1.

    • For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional).

    • For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release exported files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all exported files in the file system, specify a forward slash (/) as the path.

      A file must also meet the last accessed time criteria specified in for the file to be released.

    " }, "FileSystemId":{"shape":"FileSystemId"}, "Report":{ @@ -1515,7 +1515,7 @@ "KmsKeyId":{"shape":"KmsKeyId"}, "FileSystemTypeVersion":{ "shape":"FileSystemTypeVersion", - "documentation":"

    Sets the version for the Amazon FSx for Lustre file system that you're creating from a backup. Valid values are 2.10 and 2.12.

    You don't need to specify FileSystemTypeVersion because it will be applied using the backup's FileSystemTypeVersion setting. If you choose to specify FileSystemTypeVersion when creating from backup, the value must match the backup's FileSystemTypeVersion setting.

    " + "documentation":"

    Sets the version for the Amazon FSx for Lustre file system that you're creating from a backup. Valid values are 2.10, 2.12, and 2.15.

    You don't need to specify FileSystemTypeVersion because it will be applied using the backup's FileSystemTypeVersion setting. If you choose to specify FileSystemTypeVersion when creating from backup, the value must match the backup's FileSystemTypeVersion setting.

    " }, "OpenZFSConfiguration":{ "shape":"CreateFileSystemOpenZFSConfiguration", @@ -1628,7 +1628,7 @@ }, "RouteTableIds":{ "shape":"RouteTableIds", - "documentation":"

    (Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

    " + "documentation":"

    (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

    " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", @@ -1657,11 +1657,11 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "DeploymentType":{ "shape":"OpenZFSDeploymentType", - "documentation":"

    Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

    • MULTI_AZ_1- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1 is available in the following Amazon Web Services Regions:

    • SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.

    • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions.

    For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

    " + "documentation":"

    Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:

    • MULTI_AZ_1- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.

    • SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.

    • SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.

    For more information, see Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.

    " }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    You pay for additional throughput capacity that you provision.

    " + "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:

    • For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    You pay for additional throughput capacity that you provision.

    " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, @@ -1679,7 +1679,7 @@ }, "RouteTableIds":{ "shape":"RouteTableIds", - "documentation":"

    (Multi-AZ only) Specifies the virtual private cloud (VPC) route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

    " + "documentation":"

    (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.

    " } }, "documentation":"

    The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.

    " @@ -1730,7 +1730,7 @@ "OntapConfiguration":{"shape":"CreateFileSystemOntapConfiguration"}, "FileSystemTypeVersion":{ "shape":"FileSystemTypeVersion", - "documentation":"

    (Optional) For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10 and 2.12:

    • 2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

    • 2.12 is supported by all Lustre deployment types. 2.12 is required when setting FSx for Lustre DeploymentType to PERSISTENT_2.

    Default value = 2.10, except when DeploymentType is set to PERSISTENT_2, then the default is 2.12.

    If you set FileSystemTypeVersion to 2.10 for a PERSISTENT_2 Lustre deployment type, the CreateFileSystem operation fails.

    " + "documentation":"

    (Optional) For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10, 2.12m and 2.15:

    • 2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

    • 2.12 and 2.15 are supported by all Lustre deployment types. 2.12 or 2.15 is required when setting FSx for Lustre DeploymentType to PERSISTENT_2.

    Default value = 2.10, except when DeploymentType is set to PERSISTENT_2, then the default is 2.12.

    If you set FileSystemTypeVersion to 2.10 for a PERSISTENT_2 Lustre deployment type, the CreateFileSystem operation fails.

    " }, "OpenZFSConfiguration":{ "shape":"CreateFileSystemOpenZFSConfiguration", @@ -2173,7 +2173,7 @@ "documentation":"

    The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.

    " } }, - "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.

    " + "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    " }, "DataRepositoryAssociationId":{ "type":"string", @@ -2263,7 +2263,7 @@ }, "Type":{ "shape":"DataRepositoryTaskType", - "documentation":"

    The type of data repository task.

    • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

    • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

    • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that are archived and that meet your specified release criteria.

    • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

    " + "documentation":"

    The type of data repository task.

    • EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.

    • IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.

    • RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that have been exported to a linked S3 bucket and that meet your specified release criteria.

    • AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.

    " }, "CreationTime":{"shape":"CreationTime"}, "StartTime":{ @@ -2306,7 +2306,7 @@ "documentation":"

    The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system.

    " } }, - "documentation":"

    A description of the data repository task.

    • You use import and export data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository.

    • You use release data repository tasks to release archived files from your Amazon FSx for Lustre file system.

    • An Amazon File Cache resource uses a task to automatically release files from the cache.

    To learn more about data repository tasks, see Data Repository Tasks.

    " + "documentation":"

    A description of the data repository task.

    • You use import and export data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository.

    • You use release data repository tasks to release have been exported to a linked S3 bucketed files from your Amazon FSx for Lustre file system.

    • An Amazon File Cache resource uses a task to automatically release files from the cache.

    To learn more about data repository tasks, see Data Repository Tasks.

    " }, "DataRepositoryTaskEnded":{ "type":"structure", @@ -3146,10 +3146,10 @@ }, "Value":{ "shape":"Value", - "documentation":"

    An integer that represents the minimum amount of time (in days) since a file was last accessed in the file system. Only archived files with a MAX(atime, ctime, mtime) timestamp that is more than this amount of time in the past (relative to the task create time) will be released. The default of Value is 0. This is a required parameter.

    If an archived file meets the last accessed time criteria, its file or directory path must also be specified in the Paths parameter of the operation in order for the file to be released.

    " + "documentation":"

    An integer that represents the minimum amount of time (in days) since a file was last accessed in the file system. Only exported files with a MAX(atime, ctime, mtime) timestamp that is more than this amount of time in the past (relative to the task create time) will be released. The default of Value is 0. This is a required parameter.

    If an exported file meets the last accessed time criteria, its file or directory path must also be specified in the Paths parameter of the operation in order for the file to be released.

    " } }, - "documentation":"

    Defines the minimum amount of time since last access for a file to be eligible for release. Only archived files that were last accessed or modified before this point-in-time are eligible to be released from the Amazon FSx for Lustre file system.

    " + "documentation":"

    Defines the minimum amount of time since last access for a file to be eligible for release. Only files that have been exported to S3 and that were last accessed or modified before this point-in-time are eligible to be released from the Amazon FSx for Lustre file system.

    " }, "EndTime":{"type":"timestamp"}, "ErrorMessage":{ @@ -3487,7 +3487,7 @@ }, "FileSystemTypeVersion":{ "shape":"FileSystemTypeVersion", - "documentation":"

    The Lustre version of the Amazon FSx for Lustre file system, either 2.10 or 2.12.

    " + "documentation":"

    The Lustre version of the Amazon FSx for Lustre file system, which is 2.10, 2.12, or 2.15.

    " }, "OpenZFSConfiguration":{ "shape":"OpenZFSFileSystemConfiguration", @@ -3903,7 +3903,7 @@ "DataRepositoryConfiguration":{"shape":"DataRepositoryConfiguration"}, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

    The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

    SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 is built on Lustre v2.12 and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see FSx for Lustre deployment options.

    The default is SCRATCH_1.

    " + "documentation":"

    The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

    SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see FSx for Lustre deployment options.

    The default is SCRATCH_1.

    " }, "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", @@ -4539,10 +4539,10 @@ "members":{ "DurationSinceLastAccess":{ "shape":"DurationSinceLastAccess", - "documentation":"

    Defines the point-in-time since an archived file was last accessed, in order for that file to be eligible for release. Only files that were last accessed before this point-in-time are eligible to be released from the file system.

    " + "documentation":"

    Defines the point-in-time since an exported file was last accessed, in order for that file to be eligible for release. Only files that were last accessed before this point-in-time are eligible to be released from the file system.

    " } }, - "documentation":"

    The configuration that specifies a minimum amount of time since last access for an archived file to be eligible for release from an Amazon FSx for Lustre file system. Only files that were last accessed before this point-in-time can be released. For example, if you specify a last accessed time criteria of 9 days, only files that were last accessed 9.00001 or more days ago can be released.

    Only file data that has been archived can be released. Files that have not yet been archived, such as new or changed files that have not been exported, are not eligible for release. When files are released, their metadata stays on the file system, so they can still be accessed later. Users and applications can access a released file by reading the file again, which restores data from Amazon S3 to the FSx for Lustre file system.

    If a file meets the last accessed time criteria, its file or directory path must also be specified with the Paths parameter of the operation in order for the file to be released.

    " + "documentation":"

    The configuration that specifies a minimum amount of time since last access for an exported file to be eligible for release from an Amazon FSx for Lustre file system. Only files that were last accessed before this point-in-time can be released. For example, if you specify a last accessed time criteria of 9 days, only files that were last accessed 9.00001 or more days ago can be released.

    Only file data that has been exported to S3 can be released. Files that have not yet been exported to S3, such as new or changed files that have not been exported, are not eligible for release. When files are released, their metadata stays on the file system, so they can still be accessed later. Users and applications can access a released file by reading the file again, which restores data from Amazon S3 to the FSx for Lustre file system.

    If a file meets the last accessed time criteria, its file or directory path must also be specified with the Paths parameter of the operation in order for the file to be released.

    " }, "ReleaseFileSystemNfsV3LocksRequest":{ "type":"structure", @@ -5568,7 +5568,7 @@ "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

    The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

    • For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.

    " + "documentation":"

    The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second
 (MB/s). Valid values depend on the DeploymentType you choose, as follows:

    • For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.

    " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index 551541fdcb0..5a076d3e1e7 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gamelift/src/main/resources/codegen-resources/customization.config b/services/gamelift/src/main/resources/codegen-resources/customization.config index dae30876df4..b7c812e1368 100644 --- a/services/gamelift/src/main/resources/codegen-resources/customization.config +++ b/services/gamelift/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ // with STS's naming of a similar shape. "AwsCredentials" : "Credentials" }, - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "createBuild", "searchGameSessions", "describePlayerSessions", diff --git a/services/gamelift/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/gamelift/src/main/resources/codegen-resources/endpoint-rule-set.json index d845bf7ea21..4b5e0d83c89 100644 --- a/services/gamelift/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/gamelift/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://gamelift-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://gamelift-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://gamelift-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://gamelift-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://gamelift.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://gamelift.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://gamelift.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://gamelift.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/gamelift/src/main/resources/codegen-resources/service-2.json b/services/gamelift/src/main/resources/codegen-resources/service-2.json index c3f2149ec02..814c97017ed 100644 --- a/services/gamelift/src/main/resources/codegen-resources/service-2.json +++ b/services/gamelift/src/main/resources/codegen-resources/service-2.json @@ -26,7 +26,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

    When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.

    To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING, where a new game session is created for the match.

    If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where one or more players rejected the match or failed to respond, the ticket status is set to CANCELLED, and processing is terminated. For tickets where players have accepted or not yet responded, the ticket status is returned to SEARCHING to find a new match. A new matchmaking request for these players can be submitted as needed.

    Learn more

    Add FlexMatch to a game client

    FlexMatch events (reference)

    " + "documentation":"

    Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

    When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in each ticket. Calls to this action are only valid for tickets that are in this status; calls for tickets not in this status result in an error.

    To register acceptance, specify the ticket ID, one or more players, and an acceptance response. When all players have accepted, Amazon GameLift advances the matchmaking tickets to status PLACING, and attempts to create a new game session for the match.

    If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. Each matchmaking ticket in the failed match is handled as follows:

    • If the ticket has one or more players who rejected the match or failed to respond, the ticket status is set CANCELLED and processing is terminated.

    • If all players in the ticket accepted the match, the ticket status is returned to SEARCHING to find a new match.

    Learn more

    Add FlexMatch to a game client

    FlexMatch events (reference)

    " }, "ClaimGameServer":{ "name":"ClaimGameServer", @@ -159,7 +159,7 @@ {"shape":"IdempotentParameterMismatchException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement , which uses FleetIQ algorithms and queues to optimize the placement process.

    When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The fleet must be in ACTIVE status before a game session can be created in it.

    This operation can be used in the following ways:

    • To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration.

    • To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration.

    If successful, a workflow is initiated to start a new game session. A GameSession object is returned containing the game session configuration and status. When the status is ACTIVE, game session connection information is provided and player sessions can be created for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

    Game session logs are retained for all active game sessions for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

    Available in Amazon GameLift Local.

    Learn more

    Start a game session

    All APIs by task

    " + "documentation":"

    Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement , which uses the FleetIQ algorithm and queues to optimize the placement process.

    When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The target fleet must be in ACTIVE status.

    You can use this operation in the following ways:

    • To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration.

    • To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration.

    • To create a game session on an instance in an Anywhere fleet, specify the fleet's custom location.

    If successful, Amazon GameLift initiates a workflow to start a new game session and returns a GameSession object containing the game session configuration and status. When the game session status is ACTIVE, it is updated with connection information and you can create player sessions for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

    Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

    Available in Amazon GameLift Local.

    Learn more

    Start a game session

    All APIs by task

    " }, "CreateGameSessionQueue":{ "name":"CreateGameSessionQueue", @@ -545,7 +545,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Removes a compute resource from the specified fleet. Deregister your compute resources before you delete the compute.

    " + "documentation":"

    Removes a compute resource from an Amazon GameLift Anywhere fleet. Deregistered computes can no longer host game sessions through Amazon GameLift.

    " }, "DeregisterGameServer":{ "name":"DeregisterGameServer", @@ -608,7 +608,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Retrieves properties for a compute resource. To request a compute resource specify the fleet ID and compute name. If successful, Amazon GameLift returns an object containing the build properties.

    " + "documentation":"

    Retrieves properties for a compute resource in an Amazon GameLift fleet. Call ListCompute to get a list of compute resources in a fleet. You can request information for computes in either managed EC2 fleets or Anywhere fleets.

    To request compute properties, specify the compute name and fleet ID.

    If successful, this operation returns details for the requested compute resource. For managed EC2 fleets, this operation returns the fleet's EC2 instances. For Anywhere fleets, this operation returns the fleet's registered computes.

    " }, "DescribeEC2InstanceLimits":{ "name":"DescribeEC2InstanceLimits", @@ -889,7 +889,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves information about a fleet's instances, including instance IDs, connection data, and status.

    This operation can be used in the following ways:

    • To get information on all instances that are deployed to a fleet's home Region, provide the fleet ID.

    • To get information on all instances that are deployed to a fleet's remote location, provide the fleet ID and location name.

    • To get information on a specific instance in a fleet, provide the fleet ID and instance ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, an Instance object is returned for each requested instance. Instances are not returned in any particular order.

    Learn more

    Remotely Access Fleet Instances

    Debug Fleet Issues

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including instance ID, connection data, and status. You can use this operation with a multi-location fleet to get location-specific instance information. As an alternative, use the operations ListCompute and DescribeCompute to retrieve information for compute resources, including EC2 and Anywhere fleets.

    You can call this operation in the following ways:

    • To get information on all instances in a fleet's home Region, specify the fleet ID.

    • To get information on all instances in a fleet's remote location, specify the fleet ID and location name.

    • To get information on a specific instance in a fleet, specify the fleet ID and instance ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, this operation returns Instance objects for each requested instance, listed in no particular order. If you call this operation for an Anywhere fleet, you receive an InvalidRequestException.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    Related actions

    All APIs by task

    " }, "DescribeMatchmaking":{ "name":"DescribeMatchmaking", @@ -951,7 +951,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

    Retrieves properties for one or more player sessions.

    This action can be used in the following ways:

    • To retrieve a specific player session, provide the player session ID only.

    • To retrieve all player sessions in a game session, provide the game session ID only.

    • To retrieve all player sessions for a specific player, provide a player ID only.

    To request player sessions, specify either a player session ID, game session ID, or player ID. You can filter this request by player session status. Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a PlayerSession object is returned for each session that matches the request.

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves properties for one or more player sessions.

    This action can be used in the following ways:

    • To retrieve a specific player session, provide the player session ID only.

    • To retrieve all player sessions in a game session, provide the game session ID only.

    • To retrieve all player sessions for a specific player, provide a player ID only.

    To request player sessions, specify either a player session ID, game session ID, or player ID. You can filter this request by player session status. If you provide a specific PlayerSessionId or PlayerId, Amazon GameLift ignores the filter criteria. Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a PlayerSession object is returned for each session that matches the request.

    Related actions

    All APIs by task

    " }, "DescribeRuntimeConfiguration":{ "name":"DescribeRuntimeConfiguration", @@ -1047,7 +1047,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or observing activity in real time.

    To remotely access an instance, you need credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the CLI, saving the secret can be handled as part of the GetInstanceAccess request, as shown in one of the examples for this operation.

    To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to.

    Learn more

    Remotely Access Fleet Instances

    Debug Fleet Issues

    " + "documentation":"

    Requests authorization to remotely connect to a compute resource in an Amazon GameLift fleet. Call this action to connect to an instance in a managed EC2 fleet if the fleet's game build uses Amazon GameLift server SDK 5.x or later. To connect to instances with game builds that use server SDK 4.x or earlier, call GetInstanceAccess.

    To request access to a compute, identify the specific EC2 instance and the fleet it belongs to. You can retrieve instances for a managed EC2 fleet by calling ListCompute.

    If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token. Use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    " }, "GetComputeAuthToken":{ "name":"GetComputeAuthToken", @@ -1063,7 +1063,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Requests an authentication token from Amazon GameLift. The authentication token is used by your game server to authenticate with Amazon GameLift. Each authentication token has an expiration time. To continue using the compute resource to host your game server, regularly retrieve a new authorization token.

    " + "documentation":"

    Requests an authentication token from Amazon GameLift for a registered compute in an Anywhere fleet. The game servers that are running on the compute use this token to authenticate with the Amazon GameLift service. Each server process must provide a valid authentication token in its call to the Amazon GameLift server SDK action InitSDK().

    Authentication tokens are valid for a limited time span. Use a mechanism to regularly request a fresh authentication token before the current token expires.

    Learn more

    " }, "GetGameSessionLogUrl":{ "name":"GetGameSessionLogUrl", @@ -1095,7 +1095,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Requests remote access to a fleet instance. Remote access is useful for debugging, gathering benchmarking data, or observing activity in real time.

    To remotely access an instance, you need credentials that match the operating system of the instance. For a Windows instance, Amazon GameLift returns a user name and password as strings for use with a Windows Remote Desktop client. For a Linux instance, Amazon GameLift returns a user name and RSA private key, also as strings, for use with an SSH client. The private key must be saved in the proper format to a .pem file before using. If you're making this request using the CLI, saving the secret can be handled as part of the GetInstanceAccess request, as shown in one of the examples for this operation.

    To request access to a specific instance, specify the IDs of both the instance and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling DescribeInstances.

    Learn more

    Remotely Access Fleet Instances

    Debug Fleet Issues

    Related actions

    All APIs by task

    " + "documentation":"

    Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet. Use this operation to connect to instances with game servers that use Amazon GameLift server SDK 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or later, call GetComputeAccess.

    To request access to an instance, specify IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID.

    If successful, this operation returns an IP address and credentials. The returned credentials match the operating system of the instance, as follows:

    • For a Windows instance: returns a user name and secret (password) for use with a Windows Remote Desktop client.

    • For a Linux instance: returns a user name and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem file. If you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically saving the secret to a .pem file.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    Related actions

    All APIs by task

    " }, "ListAliases":{ "name":"ListAliases", @@ -1140,7 +1140,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Retrieves all compute resources registered to a fleet in your Amazon Web Services account. You can filter the result set by location.

    " + "documentation":"

    Retrieves the compute resources in an Amazon GameLift fleet. You can request information for either managed EC2 fleets or Anywhere fleets.

    To request a list of computes, specify the fleet ID. You can filter the result set by location. Use the pagination parameters to retrieve results in a set of sequential pages.

    If successful, this operation returns the compute resource for the requested fleet. For managed EC2 fleets, it returns a list of EC2 instances. For Anywhere fleets, it returns a list of registered compute names.

    " }, "ListFleets":{ "name":"ListFleets", @@ -1264,7 +1264,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Registers your compute resources in a fleet you previously created. After you register a compute to your fleet, you can monitor and manage your compute using Amazon GameLift. The operation returns the compute resource containing SDK endpoint you can use to connect your game server to Amazon GameLift.

    Learn more

    " + "documentation":"

    Registers a compute resource to an Amazon GameLift Anywhere fleet. With Anywhere fleets you can incorporate your own computing hardware into an Amazon GameLift game hosting solution.

    To register a compute to a fleet, give the compute a name (must be unique within the fleet) and specify the compute resource's DNS name or IP address. Provide the Anywhere fleet ID and a fleet location to associate with the compute being registered. You can optionally include the path to a TLS certificate on the compute resource.

    If successful, this operation returns the compute details, including an Amazon GameLift SDK endpoint. Game server processes that run on the compute use this endpoint to communicate with the Amazon GameLift service. Each server process includes the SDK endpoint in its call to the Amazon GameLift server SDK action InitSDK().

    Learn more

    " }, "RegisterGameServer":{ "name":"RegisterGameServer", @@ -1616,7 +1616,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Updates information about a registered game server to help Amazon GameLift FleetIQ to track game server availability. This operation is called by a game server process that is running on an instance in a game server group.

    Use this operation to update the following types of game server information. You can make all three types of updates in the same request:

    • To update the game server's utilization status, identify the game server and game server group and specify the current utilization status. Use this status to identify when game servers are currently hosting games and when they are available to be claimed.

    • To report health status, identify the game server and game server group and set health check to HEALTHY. If a game server does not report health status for a certain length of time, the game server is no longer considered healthy. As a result, it will be eventually deregistered from the game server group to avoid affecting utilization metrics. The best practice is to report health every 60 seconds.

    • To change game server metadata, provide updated game server data.

    Once a game server is successfully updated, the relevant statuses and timestamps are updated.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Updates information about a registered game server to help Amazon GameLift FleetIQ track game server availability. This operation is called by a game server process that is running on an instance in a game server group.

    Use this operation to update the following types of game server information. You can make all three types of updates in the same request:

    • To update the game server's utilization status from AVAILABLE (when the game server is available to be claimed) to UTILIZED (when the game server is currently hosting games). Identify the game server and game server group and specify the new utilization status. You can't change the status from to UTILIZED to AVAILABLE .

    • To report health status, identify the game server and game server group and set health check to HEALTHY. If a game server does not report health status for a certain length of time, the game server is no longer considered healthy. As a result, it will be eventually deregistered from the game server group to avoid affecting utilization metrics. The best practice is to report health every 60 seconds.

    • To change game server metadata, provide updated game server data.

    Once a game server is successfully updated, the relevant statuses and timestamps are updated.

    Learn more

    Amazon GameLift FleetIQ Guide

    " }, "UpdateGameServerGroup":{ "name":"UpdateGameServerGroup", @@ -1873,18 +1873,18 @@ "members":{ "AccessKeyId":{ "shape":"NonEmptyString", - "documentation":"

    Temporary key allowing access to the Amazon GameLift S3 account.

    " + "documentation":"

    The access key ID that identifies the temporary security credentials.

    " }, "SecretAccessKey":{ "shape":"NonEmptyString", - "documentation":"

    Temporary secret key allowing access to the Amazon GameLift S3 account.

    " + "documentation":"

    The secret access key that can be used to sign requests.

    " }, "SessionToken":{ "shape":"NonEmptyString", - "documentation":"

    Token used to associate a specific build ID with the files uploaded using these credentials.

    " + "documentation":"

    The token that users must pass to the service API to use the temporary credentials.

    " } }, - "documentation":"

    Temporary access credentials used for uploading game build files to Amazon GameLift. They are valid for a limited time. If they expire before you upload your game build, get a new set by calling RequestUploadCredentials.

    ", + "documentation":"

    Amazon Web Services account security credentials that allow interactions with Amazon GameLift resources. The credentials are temporary and valid for a limited time span. You can request fresh credentials at any time.

    Amazon Web Services security credentials consist of three parts: an access key ID, a secret access key, and a session token. You must use all three parts together to authenticate your access requests.

    You need Amazon Web Services credentials for the following tasks:

    • To upload a game server build directly to Amazon GameLift S3 storage using CreateBuild. To get access for this task, call RequestUploadCredentials.

    • To remotely connect to an active Amazon GameLift fleet instances. To get remote access, call GetComputeAccess.

    ", "sensitive":true }, "BackfillMode":{ @@ -2042,27 +2042,27 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

    A unique identifier for the fleet that the compute is registered to.

    " + "documentation":"

    A unique identifier for the fleet that the compute belongs to.

    " }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) of the fleet that the compute is registered to.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the fleet that the compute belongs to.

    " }, "ComputeName":{ "shape":"ComputeName", - "documentation":"

    A descriptive label that is associated with the compute resource registered to your fleet.

    " + "documentation":"

    A descriptive label for the compute resource. For instances in a managed EC2 fleet, the compute name is an instance ID.

    " }, "ComputeArn":{ "shape":"ComputeArn", - "documentation":"

    The ARN that is assigned to the compute resource and uniquely identifies it. ARNs are unique across locations.

    " + "documentation":"

    The ARN that is assigned to a compute resource and uniquely identifies it. ARNs are unique across locations. Instances in managed EC2 fleets are not assigned a ComputeARN.

    " }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the compute resource. Amazon GameLift requires the DNS name or IP address to manage your compute resource.

    " + "documentation":"

    The IP address of a compute resource. Amazon GameLift requires a DNS name or IP address for a compute.

    " }, "DnsName":{ "shape":"DnsName", - "documentation":"

    The DNS name of the compute resource. Amazon GameLift requires the DNS name or IP address to manage your compute resource.

    " + "documentation":"

    The DNS name of a compute resource. Amazon GameLift requires a DNS name or IP address for a compute.

    " }, "ComputeStatus":{ "shape":"ComputeStatus", @@ -2078,18 +2078,18 @@ }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    The type of operating system on your compute resource.

    " + "documentation":"

    The type of operating system on the compute resource.

    " }, "Type":{ "shape":"EC2InstanceType", - "documentation":"

    The compute type that the fleet uses. A fleet can use Anywhere compute resources that you own, or use managed Amazon EC2 instances.

    " + "documentation":"

    The Amazon EC2 instance type that the fleet uses. For registered computes in an Amazon GameLift Anywhere fleet, this property is empty.

    " }, "GameLiftServiceSdkEndpoint":{ "shape":"GameLiftServiceSdkEndpointOutput", - "documentation":"

    The endpoint connection details of the Amazon GameLift SDK endpoint that your game server connects to.

    " + "documentation":"

    The Amazon GameLift SDK endpoint connection for a registered compute resource in an Anywhere fleet. The game servers on the compute use this endpoint to connect to the Amazon GameLift service.

    " } }, - "documentation":"

    Resources used to host your game servers. A compute resource can be managed Amazon GameLift Amazon EC2 instances or your own resources.

    " + "documentation":"

    An Amazon GameLift compute resource for hosting your game servers. A compute can be an EC2instance in a managed EC2 fleet or a registered compute in an Anywhere fleet.

    " }, "ComputeArn":{ "type":"string", @@ -2469,7 +2469,7 @@ }, "Location":{ "shape":"LocationStringModel", - "documentation":"

    A fleet's remote location to place the new game session in. If this parameter is not set, the new game session is placed in the fleet's home Region. Specify a remote location with an Amazon Web Services Region code such as us-west-2.

    " + "documentation":"

    A fleet's remote location to place the new game session in. If this parameter is not set, the new game session is placed in the fleet's home Region. Specify a remote location with an Amazon Web Services Region code such as us-west-2. When using an Anywhere fleet, this parameter is required and must be set to the Anywhere fleet's custom location.

    " } } }, @@ -3058,11 +3058,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    >A unique identifier for the fleet the compute resource is registered to.

    " + "documentation":"

    A unique identifier for the fleet the compute resource is currently registered to.

    " }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    The name of the compute resource you want to delete.

    " + "documentation":"

    The name of the compute resource to remove from the specified Anywhere fleet.

    " } } }, @@ -3135,11 +3135,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet the compute is registered to.

    " + "documentation":"

    A unique identifier for the fleet that the compute is registered to. You can use either the fleet ID or ARN value.

    " }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    A descriptive label that is associated with the compute resource registered to your fleet.

    " + "documentation":"

    The unique identifier of the compute resource to retrieve properties for. For an Anywhere fleet compute, use the registered compute name. For a managed EC2 fleet instance, use the instance ID.

    " } } }, @@ -3148,7 +3148,7 @@ "members":{ "Compute":{ "shape":"Compute", - "documentation":"

    The details of the compute resource you registered to the specified fleet.

    " + "documentation":"

    The set of properties for the requested compute resource.

    " } } }, @@ -3226,7 +3226,7 @@ "members":{ "FleetCapacity":{ "shape":"FleetCapacityList", - "documentation":"

    A collection of objects that contains capacity information for each requested fleet ID. Capacity objects are returned only for fleets that currently exist.

    " + "documentation":"

    A collection of objects that contains capacity information for each requested fleet ID. Capacity objects are returned only for fleets that currently exist. Changes in desired instance value can take up to 1 minute to be reflected.

    " }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -3338,7 +3338,7 @@ "members":{ "FleetCapacity":{ "shape":"FleetCapacity", - "documentation":"

    Resource capacity information for the requested fleet location. Capacity objects are returned only for fleets and locations that currently exist.

    " + "documentation":"

    Resource capacity information for the requested fleet location. Capacity objects are returned only for fleets and locations that currently exist. Changes in desired instance value can take up to 1 minute to be reflected.

    " } } }, @@ -3964,7 +3964,7 @@ "members":{ "DESIRED":{ "shape":"WholeNumber", - "documentation":"

    Ideal number of active instances. GameLift will always try to maintain the desired number of instances. Capacity is scaled up or down by changing the desired instances.

    " + "documentation":"

    Requested number of active instances. Amazon GameLift takes action as needed to maintain the desired number of instances. Capacity is scaled up or down by changing the desired instances. A change in the desired instances value can take up to 1 minute to be reflected when viewing a fleet's capacity settings.

    " }, "MINIMUM":{ "shape":"WholeNumber", @@ -4135,7 +4135,68 @@ "r5d.8xlarge", "r5d.12xlarge", "r5d.16xlarge", - "r5d.24xlarge" + "r5d.24xlarge", + "m6g.medium", + "m6g.large", + "m6g.xlarge", + "m6g.2xlarge", + "m6g.4xlarge", + "m6g.8xlarge", + "m6g.12xlarge", + "m6g.16xlarge", + "c6g.medium", + "c6g.large", + "c6g.xlarge", + "c6g.2xlarge", + "c6g.4xlarge", + "c6g.8xlarge", + "c6g.12xlarge", + "c6g.16xlarge", + "r6g.medium", + "r6g.large", + "r6g.xlarge", + "r6g.2xlarge", + "r6g.4xlarge", + "r6g.8xlarge", + "r6g.12xlarge", + "r6g.16xlarge", + "c6gn.medium", + "c6gn.large", + "c6gn.xlarge", + "c6gn.2xlarge", + "c6gn.4xlarge", + "c6gn.8xlarge", + "c6gn.12xlarge", + "c6gn.16xlarge", + "c7g.medium", + "c7g.large", + "c7g.xlarge", + "c7g.2xlarge", + "c7g.4xlarge", + "c7g.8xlarge", + "c7g.12xlarge", + "c7g.16xlarge", + "r7g.medium", + "r7g.large", + "r7g.xlarge", + "r7g.2xlarge", + "r7g.4xlarge", + "r7g.8xlarge", + "r7g.12xlarge", + "r7g.16xlarge", + "m7g.medium", + "m7g.large", + "m7g.xlarge", + "m7g.2xlarge", + "m7g.4xlarge", + "m7g.8xlarge", + "m7g.12xlarge", + "m7g.16xlarge", + "g5g.xlarge", + "g5g.2xlarge", + "g5g.4xlarge", + "g5g.8xlarge", + "g5g.16xlarge" ] }, "Event":{ @@ -4151,7 +4212,7 @@ }, "EventCode":{ "shape":"EventCode", - "documentation":"

    The type of event being logged.

    Fleet state transition events:

    • FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID.

    • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

    • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. Amazon GameLift has successfully downloaded the build and is now validating the build files.

    • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. Amazon GameLift has successfully verified the build files and is now running the installation scripts.

    • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. Amazon GameLift is trying to launch an instance and test the connectivity between the build and the Amazon GameLift Service via the Server SDK.

    • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

    • FLEET_STATE_ERROR -- The Fleet's status changed to ERROR. Describe the fleet event message for more details.

    Fleet creation events (ordered by fleet creation activity):

    • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

    • FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were successfully extracted, and the GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

    • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

    • FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation timed out. Try fleet creation again.

    • FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. For more information, see Debug Fleet Creation Issues.

    • FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain any instances based on the input fleet attributes. Try again at a different time or choose a different combination of fleet attributes such as fleet type, instance type, etc.

    • FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet creation. Describe the fleet event message for more details.

    VPC peering events:

    • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.

    • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

    • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

    Spot instance events:

    • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

    Server process events:

    • SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not be found based on the Fleet runtime configuration. Check that the launch path is correct based on the operating system of the Fleet.

    • SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call InitSDK() within the time expected. Check your game session log to see why InitSDK() was not called in time.

    • SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call ProcessReady() within the time expected after calling InitSDK(). Check your game session log to see why ProcessReady() was not called in time.

    • SERVER_PROCESS_CRASHED -- The server process exited without calling ProcessEnding(). Check your game session log to see why ProcessEnding() was not called.

    • SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a valid health check for too long and was therefore terminated by GameLift. Check your game session log to see if the thread became stuck processing a synchronous task for too long.

    • SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly after OnProcessTerminate() was sent within the time expected. Check your game session log to see why termination took longer than expected.

    • SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly within the time expected after calling ProcessEnding(). Check your game session log to see why termination took longer than expected.

    Game session events:

    • GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the expected time. Check your game session log to see why ActivateGameSession() took longer to complete than expected.

    Other fleet events:

    • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

    • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

    • FLEET_DELETED -- A request to delete a fleet was initiated.

    • GENERIC_EVENT -- An unspecified event has occurred.

    " + "documentation":"

    The type of event being logged.

    Fleet state transition events:

    • FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID.

    • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The compressed build has started downloading to a fleet instance for installation.

    • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. Amazon GameLift has successfully downloaded the build and is now validating the build files.

    • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. Amazon GameLift has successfully verified the build files and is now running the installation scripts.

    • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. Amazon GameLift is trying to launch an instance and test the connectivity between the build and the Amazon GameLift Service via the Server SDK.

    • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

    • FLEET_STATE_ERROR -- The Fleet's status changed to ERROR. Describe the fleet event message for more details.

    Fleet creation events (ordered by fleet creation activity):

    • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

    • FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were successfully extracted, and the GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

    • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

    • FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation timed out. Try fleet creation again.

    • FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. For more information, see Debug Fleet Creation Issues.

    • FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain any instances based on the input fleet attributes. Try again at a different time or choose a different combination of fleet attributes such as fleet type, instance type, etc.

    • FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet creation. Describe the fleet event message for more details.

    VPC peering events:

    • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.

    • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

    • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

    Spot instance events:

    • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

    • INSTANCE_RECYCLED -- A spot instance was determined to have a high risk of interruption and is scheduled to be recycled once it has no active game sessions.

    Server process events:

    • SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not be found based on the Fleet runtime configuration. Check that the launch path is correct based on the operating system of the Fleet.

    • SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call InitSDK() within the time expected. Check your game session log to see why InitSDK() was not called in time.

    • SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call ProcessReady() within the time expected after calling InitSDK(). Check your game session log to see why ProcessReady() was not called in time.

    • SERVER_PROCESS_CRASHED -- The server process exited without calling ProcessEnding(). Check your game session log to see why ProcessEnding() was not called.

    • SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a valid health check for too long and was therefore terminated by GameLift. Check your game session log to see if the thread became stuck processing a synchronous task for too long.

    • SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly after OnProcessTerminate() was sent within the time expected. Check your game session log to see why termination took longer than expected.

    • SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly within the time expected after calling ProcessEnding(). Check your game session log to see why termination took longer than expected.

    Game session events:

    • GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the expected time. Check your game session log to see why ActivateGameSession() took longer to complete than expected.

    Other fleet events:

    • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

    • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

    • FLEET_DELETED -- A request to delete a fleet was initiated.

    • GENERIC_EVENT -- An unspecified event has occurred.

    " }, "Message":{ "shape":"NonEmptyString", @@ -4876,7 +4937,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) associated with the GameLift fleet that this game session is running on.

    " + "documentation":"

    The Amazon Resource Name (ARN) associated with the GameLift fleet that this game session is running on.

    " }, "CreationTime":{ "shape":"Timestamp", @@ -4932,14 +4993,14 @@ }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"

    Information about the matchmaking process that was used to create the game session. It is in JSON syntax, formatted as a string. In addition the matchmaking configuration used, it contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is useful when requesting match backfills, and is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

    " + "documentation":"

    Information about the matchmaking process that resulted in the game session, if matchmaking was used. Data is in JSON syntax, formatted as a string. Information includes the matchmaker ID as well as player attributes and team assignments. For more details on matchmaker data, see Match Data. Matchmaker data is updated whenever new players are added during a successful backfill (see StartMatchBackfill).

    " }, "Location":{ "shape":"LocationStringModel", "documentation":"

    The fleet location where the game session is running. This value might specify the fleet's home Region or a remote location. Location is expressed as an Amazon Web Services Region code such as us-west-2.

    " } }, - "documentation":"

    Properties describing a game session.

    A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

    Once the session ends, the game session object is retained for 30 days. This means you can reuse idempotency token values after this time. Game session logs are retained for 14 days.

    All APIs by task

    " + "documentation":"

    Properties describing a game session.

    A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

    Amazon GameLift retains a game session resource for 30 days after the game session ends. You can reuse idempotency token values after this time. Game session logs are retained for 14 days.

    All APIs by task

    " }, "GameSessionActivationTimeoutSeconds":{ "type":"integer", @@ -5020,7 +5081,7 @@ }, "Status":{ "shape":"GameSessionPlacementState", - "documentation":"

    Current status of the game session placement request.

    • PENDING -- The placement request is currently in the queue waiting to be processed.

    • FULFILLED -- A new game session and player sessions (if requested) have been successfully created. Values for GameSessionArn and GameSessionRegion are available.

    • CANCELLED -- The placement request was canceled.

    • TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed.

    • FAILED -- Amazon GameLift is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error.

    " + "documentation":"

    Current status of the game session placement request.

    • PENDING -- The placement request is in the queue waiting to be processed. Game session properties are not yet final.

    • FULFILLED -- A new game session has been successfully placed. Game session properties are now final.

    • CANCELLED -- The placement request was canceled.

    • TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed.

    • FAILED -- Amazon GameLift is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error.

    " }, "GameProperties":{ "shape":"GamePropertyList", @@ -5036,15 +5097,15 @@ }, "GameSessionId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for the game session. This value is set once the new game session is placed (placement status is FULFILLED).

    " + "documentation":"

    A unique identifier for the game session. This value isn't final until placement status is FULFILLED.

    " }, "GameSessionArn":{ "shape":"NonZeroAndMaxString", - "documentation":"

    Identifier for the game session created by this placement request. This value is set once the new game session is placed (placement status is FULFILLED). This identifier is unique across all Regions. You can use this value as a GameSessionId value as needed.

    " + "documentation":"

    Identifier for the game session created by this placement request. This identifier is unique across all Regions. This value isn't final until placement status is FULFILLED.

    " }, "GameSessionRegion":{ "shape":"NonZeroAndMaxString", - "documentation":"

    Name of the Region where the game session created by this placement request is running. This value is set once the new game session is placed (placement status is FULFILLED).

    " + "documentation":"

    Name of the Region where the game session created by this placement request is running. This value isn't final until placement status is FULFILLED.

    " }, "PlayerLatencies":{ "shape":"PlayerLatencyList", @@ -5060,7 +5121,7 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

    " + "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED.

    " }, "DnsName":{ "shape":"DnsName", @@ -5068,11 +5129,11 @@ }, "Port":{ "shape":"PortNumber", - "documentation":"

    The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value is set once the new game session is placed (placement status is FULFILLED).

    " + "documentation":"

    The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED.

    " }, "PlacedPlayerSessions":{ "shape":"PlacedPlayerSessionList", - "documentation":"

    A collection of information on player sessions created in response to the game session placement request. These player sessions are created only once a new game session is successfully placed (placement status is FULFILLED). This information includes the player ID (as provided in the placement request) and the corresponding player session ID.

    " + "documentation":"

    A collection of information on player sessions created in response to the game session placement request. These player sessions are created only after a new game session is successfully placed (placement status is FULFILLED). This information includes the player ID, provided in the placement request, and a corresponding player session ID.

    " }, "GameSessionData":{ "shape":"LargeGameSessionData", @@ -5083,7 +5144,7 @@ "documentation":"

    Information on the matchmaking process for this game. Data is in JSON syntax, formatted as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.

    " } }, - "documentation":"

    This object includes the full details of the original request plus the current status and start/end time stamps.

    " + "documentation":"

    Represents a potential game session placement, including the full details of the original placement request and the current status.

    If the game session placement status is PENDING, the properties for game session ID/ARN, region, IP address/DNS, and port aren't final. A game session is not active and ready to accept players until placement status reaches FULFILLED. When the placement is in PENDING status, Amazon GameLift may attempt to place a game session multiple times before succeeding. With each attempt it creates a GameSession object and updates this placement object with the new game session properties..

    " }, "GameSessionPlacementState":{ "type":"string", @@ -5200,11 +5261,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet that the compute resource is registered to.

    " + "documentation":"

    A unique identifier for the fleet that contains the compute resource you want to connect to. You can use either the fleet ID or ARN value.

    " }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    The name of the compute resource you are requesting credentials for.

    " + "documentation":"

    A unique identifier for the compute resource that you want to connect to. You can use either a registered compute name or an instance ID.

    " } } }, @@ -5213,7 +5274,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    The fleet ID of compute resource.

    " + "documentation":"

    The ID of the fleet that contains the compute resource to be accessed.

    " }, "FleetArn":{ "shape":"FleetArn", @@ -5221,15 +5282,15 @@ }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    The name of the compute resource you requested credentials for.

    " + "documentation":"

    The identifier of the compute resource to be accessed. This value might be either a compute name or an instance ID.

    " }, "ComputeArn":{ "shape":"ComputeArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "Credentials":{ "shape":"AwsCredentials", - "documentation":"

    The access credentials for the compute resource.

    " + "documentation":"

    A set of temporary Amazon Web Services credentials for use when connecting to the compute resource with Amazon EC2 Systems Manager (SSM).

    " } } }, @@ -5263,19 +5324,19 @@ }, "ComputeName":{ "shape":"ComputeNameOrArn", - "documentation":"

    The name of the compute resource you are requesting the authentication token for.

    " + "documentation":"

    The name of the compute resource that the authentication token is issued to.

    " }, "ComputeArn":{ "shape":"ComputeArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "AuthToken":{ "shape":"ComputeAuthToken", - "documentation":"

    The authentication token that your game server uses to authenticate with Amazon GameLift.

    " + "documentation":"

    A valid temporary authentication token.

    " }, "ExpirationTimestamp":{ "shape":"Timestamp", - "documentation":"

    The amount of time until the authentication token is no longer valid. To continue using the compute resource for game server hosting, renew the authentication token by using this operation again.

    " + "documentation":"

    The amount of time until the authentication token is no longer valid.

    " } } }, @@ -5307,11 +5368,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet that contains the instance you want access to. You can use either the fleet ID or ARN value. The fleet can be in any of the following statuses: ACTIVATING, ACTIVE, or ERROR. Fleets with an ERROR status may be accessible for a short time before they are deleted.

    " + "documentation":"

    A unique identifier for the fleet that contains the instance you want to access. You can request access to instances in EC2 fleets with the following statuses: ACTIVATING, ACTIVE, or ERROR. Use either a fleet ID or an ARN value.

    You can access fleets in ERROR status for a short period of time before Amazon GameLift deletes them.

    " }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

    A unique identifier for the instance you want to get access to. You can access an instance in any status.

    " + "documentation":"

    A unique identifier for the instance you want to access. You can access an instance in any status.

    " } } }, @@ -5349,7 +5410,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

    A unique identifier for the fleet that the instance is in.

    " + "documentation":"

    A unique identifier for the fleet that the instance belongs to.

    " }, "FleetArn":{ "shape":"FleetArn", @@ -5365,15 +5426,15 @@ }, "DnsName":{ "shape":"DnsName", - "documentation":"

    The DNS identifier assigned to the instance that is running the game session. Values have the following format:

    • TLS-enabled fleets: <unique identifier>.<region identifier>.amazongamelift.com.

    • Non-TLS-enabled fleets: ec2-<unique identifier>.compute.amazonaws.com. (See Amazon EC2 Instance IP Addressing.)

    When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

    " + "documentation":"

    The DNS identifier assigned to the instance that is running the game session. Values have the following format:

    When connecting to a game session that is running on a TLS-enabled fleet, you must use the DNS name, not the IP address.

    " }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    Operating system that is running on this instance.

    " + "documentation":"

    Operating system that is running on this EC2 instance.

    " }, "Type":{ "shape":"EC2InstanceType", - "documentation":"

    Amazon EC2 instance type that defines the computing resources of this instance.

    " + "documentation":"

    EC2 instance type that defines the computing resources of this instance.

    " }, "Status":{ "shape":"InstanceStatus", @@ -5388,22 +5449,22 @@ "documentation":"

    The fleet location of the instance, expressed as an Amazon Web Services Region code, such as us-west-2.

    " } }, - "documentation":"

    Represents an EC2 instance of virtual computing resources that hosts one or more game servers. In Amazon GameLift, a fleet can contain zero or more instances.

    Related actions

    " + "documentation":"

    Represents a virtual computing instance that runs game server processes and hosts game sessions. In Amazon GameLift, one or more instances make up a managed EC2 fleet.

    " }, "InstanceAccess":{ "type":"structure", "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

    A unique identifier for the fleet containing the instance being accessed.

    " + "documentation":"

    A unique identifier for the fleet containing the instance to be accessed.

    " }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

    A unique identifier for the instance being accessed.

    " + "documentation":"

    A unique identifier for the instance to be accessed.

    " }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    IP address that is assigned to the instance.

    " + "documentation":"

    IP address assigned to the instance.

    " }, "OperatingSystem":{ "shape":"OperatingSystem", @@ -5411,24 +5472,24 @@ }, "Credentials":{ "shape":"InstanceCredentials", - "documentation":"

    Credentials required to access the instance.

    " + "documentation":"

    Security credentials that are required to access the instance.

    " } }, - "documentation":"

    Information required to remotely connect to a fleet instance.

    " + "documentation":"

    Information and credentials that you can use to remotely connect to an instance in an EC2 managed fleet. This data type is returned in response to a call to GetInstanceAccess.

    " }, "InstanceCredentials":{ "type":"structure", "members":{ "UserName":{ "shape":"NonEmptyString", - "documentation":"

    User login string.

    " + "documentation":"

    A user name for logging in.

    " }, "Secret":{ "shape":"NonEmptyString", - "documentation":"

    Secret string. For Windows instances, the secret is a password for use with Windows Remote Desktop. For Linux instances, it is a private key (which must be saved as a .pem file) for use with SSH.

    " + "documentation":"

    Secret string. For Windows instances, the secret is a password for use with Windows Remote Desktop. For Linux instances, it's a private key for use with SSH.

    " } }, - "documentation":"

    Set of credentials required to remotely access a fleet instance.

    ", + "documentation":"

    A set of credentials that allow remote access to an instance in an EC2 managed fleet. These credentials are returned in response to a call to GetInstanceAccess, which requests access for instances that are running game servers with the Amazon GameLift server SDK version 4.x or earlier.

    ", "sensitive":true }, "InstanceDefinition":{ @@ -5684,11 +5745,11 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet the compute resources are registered to.

    " + "documentation":"

    A unique identifier for the fleet to retrieve compute resources for.

    " }, "Location":{ "shape":"LocationStringModel", - "documentation":"

    The name of the custom location that the compute resources are assigned to.

    " + "documentation":"

    The name of a location to retrieve compute resources for.

    " }, "Limit":{ "shape":"PositiveInteger", @@ -5705,7 +5766,7 @@ "members":{ "ComputeList":{ "shape":"ComputeList", - "documentation":"

    A list of compute resources registered to the fleet you specified.

    " + "documentation":"

    A list of compute resources in the specified fleet.

    " }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -6672,23 +6733,23 @@ }, "ComputeName":{ "shape":"ComputeName", - "documentation":"

    A descriptive label that is associated with the compute resource registered to your fleet.

    " + "documentation":"

    A descriptive label for the compute resource.

    " }, "CertificatePath":{ "shape":"NonZeroAndMaxString", - "documentation":"

    The path to the TLS certificate on your compute resource. The path and certificate are not validated by Amazon GameLift.

    " + "documentation":"

    The path to a TLS certificate on your compute resource. Amazon GameLift doesn't validate the path and certificate.

    " }, "DnsName":{ "shape":"DnsNameInput", - "documentation":"

    The DNS name of the compute resource. Amazon GameLift requires the DNS name or IP address to manage your compute resource.

    " + "documentation":"

    The DNS name of the compute resource. Amazon GameLift requires either a DNS name or IP address.

    " }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the compute resource. Amazon GameLift requires the DNS name or IP address to manage your compute resource.

    " + "documentation":"

    The IP address of the compute resource. Amazon GameLift requires either a DNS name or IP address.

    " }, "Location":{ "shape":"LocationStringModel", - "documentation":"

    The name of the custom location you added to the fleet you are registering this compute resource to.

    " + "documentation":"

    The name of a custom location to associate with the compute resource being registered.

    " } } }, @@ -6697,7 +6758,7 @@ "members":{ "Compute":{ "shape":"Compute", - "documentation":"

    The details of the compute resource you registered to the specified fleet.

    " + "documentation":"

    The details of the compute resource you registered.

    " } } }, @@ -7092,7 +7153,7 @@ "members":{ "LaunchPath":{ "shape":"LaunchPathStringModel", - "documentation":"

    The location of a game build executable or the Realtime script file that contains the Init() function. Game builds and Realtime scripts are installed on instances at the root:

    • Windows (custom game builds only): C:\\game. Example: \"C:\\game\\MyGame\\server.exe\"

    • Linux: /local/game. Examples: \"/local/game/MyGame/server.exe\" or \"/local/game/MyRealtimeScript.js\"

    " + "documentation":"

    The location of a game build executable or Realtime script. Game builds and Realtime scripts are installed on instances at the root:

    • Windows (custom game builds only): C:\\game. Example: \"C:\\game\\MyGame\\server.exe\"

    • Linux: /local/game. Examples: \"/local/game/MyGame/server.exe\" or \"/local/game/MyRealtimeScript.js\"

    Amazon GameLift doesn't support the use of setup scripts that launch the game executable. For custom game builds, this parameter must indicate the executable that calls the server SDK operations initSDK() and ProcessReady().

    " }, "Parameters":{ "shape":"LaunchParametersStringModel", @@ -7593,7 +7654,7 @@ }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

    The game session protection policy to apply to all new instances created in this fleet. Instances that already exist are not affected. You can set protection for individual instances using UpdateGameSession .

    • NoProtection -- The game session can be terminated during a scale-down event.

    • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    " + "documentation":"

    The game session protection policy to apply to all new game sessions created in this fleet. Game sessions that already exist are not affected. You can set protection for individual game sessions using UpdateGameSession .

    • NoProtection -- The game session can be terminated during a scale-down event.

    • FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event.

    " }, "ResourceCreationLimitPolicy":{ "shape":"ResourceCreationLimitPolicy", @@ -7632,7 +7693,7 @@ }, "DesiredInstances":{ "shape":"WholeNumber", - "documentation":"

    The number of Amazon EC2 instances you want to maintain in the specified fleet location. This value must fall between the minimum and maximum size limits.

    " + "documentation":"

    The number of Amazon EC2 instances you want to maintain in the specified fleet location. This value must fall between the minimum and maximum size limits. Changes in desired instance value can take up to 1 minute to be reflected when viewing the fleet's capacity settings.

    " }, "MinSize":{ "shape":"WholeNumber", @@ -7752,7 +7813,7 @@ }, "UtilizationStatus":{ "shape":"GameServerUtilizationStatus", - "documentation":"

    Indicates whether the game server is available or is currently hosting gameplay.

    " + "documentation":"

    Indicates if the game server is available or is currently hosting gameplay. You can update a game server status from AVAILABLE to UTILIZED, but you can't change a the status from UTILIZED to AVAILABLE.

    " }, "HealthCheck":{ "shape":"GameServerHealthCheck", diff --git a/services/gamesparks/pom.xml b/services/gamesparks/pom.xml index 8ad8c73c539..f411dba34bf 100644 --- a/services/gamesparks/pom.xml +++ b/services/gamesparks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT gamesparks AWS Java SDK :: Services :: Game Sparks diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index 3eb80c729a5..5f72c974054 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index b9a66177e18..4224f036375 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/customization.config b/services/globalaccelerator/src/main/resources/codegen-resources/customization.config index f35548b15b2..52f3ddd37a7 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/customization.config +++ b/services/globalaccelerator/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "listAccelerators" ], "defaultSimpleMethodTestRegion": "US_WEST_2", - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeAcceleratorAttributes" ] } diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json b/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json index cb7a9c9eda5..d95f9dcbf71 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json +++ b/services/globalaccelerator/src/main/resources/codegen-resources/service-2.json @@ -1948,7 +1948,7 @@ }, "ClientIPPreservationEnabled":{ "shape":"GenericBoolean", - "documentation":"

    Indicates whether client IP address preservation is enabled for an endpoint. The value is true or false. The default value is true for new accelerators.

    If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the endpoint fronted by the accelerator.

    Client IP address preservation is supported, in specific Amazon Web Services Regions, for endpoints that are Application Load Balancers and Amazon EC2 instances.

    For more information, see Preserve client IP addresses in Global Accelerator in the Global Accelerator Developer Guide.

    " + "documentation":"

    Indicates whether client IP address preservation is enabled for an endpoint. The value is true or false. The default value is true for new accelerators.

    If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the endpoint fronted by the accelerator.

    Client IP address preservation is supported, in specific Amazon Web Services Regions, for endpoints that are Application Load Balancers, Amazon EC2 instances, and Network Load Balancers with Security Groups. IMPORTANT: You cannot use client IP address preservation with Network Load Balancers with TLS listeners.

    For more information, see Preserve client IP addresses in Global Accelerator in the Global Accelerator Developer Guide.

    " } }, "documentation":"

    A complex type for endpoints. A resource must be valid and active when you add it as an endpoint.

    " @@ -1980,7 +1980,7 @@ }, "ClientIPPreservationEnabled":{ "shape":"GenericBoolean", - "documentation":"

    Indicates whether client IP address preservation is enabled for an endpoint. The value is true or false. The default value is true for new accelerators.

    If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the endpoint fronted by the accelerator.

    Client IP address preservation is supported, in specific Amazon Web Services Regions, for endpoints that are Application Load Balancers and Amazon EC2 instances.

    For more information, see Preserve client IP addresses in Global Accelerator in the Global Accelerator Developer Guide.

    " + "documentation":"

    Indicates whether client IP address preservation is enabled for an endpoint. The value is true or false. The default value is true for new accelerators.

    If the value is set to true, the client's IP address is preserved in the X-Forwarded-For request header as traffic travels to applications on the endpoint fronted by the accelerator.

    Client IP address preservation is supported, in specific Amazon Web Services Regions, for endpoints that are Application Load Balancers, Amazon EC2 instances, and Network Load Balancers with Security Groups. IMPORTANT: You cannot use client IP address preservation with Network Load Balancers with TLS listeners.

    For more information, see Preserve client IP addresses in Global Accelerator in the Global Accelerator Developer Guide.

    " } }, "documentation":"

    A complex type for an endpoint. Each endpoint group can include one or more endpoints, such as load balancers.

    " diff --git a/services/glue/pom.xml b/services/glue/pom.xml index b9957566262..4803ca1610d 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/customization.config b/services/glue/src/main/resources/codegen-resources/customization.config index 6abaf75b14b..e5d2b586984 100644 --- a/services/glue/src/main/resources/codegen-resources/customization.config +++ b/services/glue/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["*"] + "excludedSimpleMethods" : ["*"] } diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 5084c09af87..2ccbfb1c665 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -6562,6 +6562,10 @@ "CustomDatatypes":{ "shape":"CustomDatatypes", "documentation":"

    Creates a list of supported custom datatypes.

    " + }, + "Serde":{ + "shape":"CsvSerdeOption", + "documentation":"

    Sets the SerDe for processing CSV in the classifier, which will be applied in the Data Catalog. Valid values are OpenCSVSerDe, LazySimpleSerDe, and None. You can specify the None value when you want the crawler to do the detection.

    " } }, "documentation":"

    Specifies a custom CSV classifier for CreateClassifier to create.

    " @@ -7613,6 +7617,10 @@ "CustomDatatypes":{ "shape":"CustomDatatypes", "documentation":"

    A list of custom datatypes including \"BINARY\", \"BOOLEAN\", \"DATE\", \"DECIMAL\", \"DOUBLE\", \"FLOAT\", \"INT\", \"LONG\", \"SHORT\", \"STRING\", \"TIMESTAMP\".

    " + }, + "Serde":{ + "shape":"CsvSerdeOption", + "documentation":"

    Sets the SerDe for processing CSV in the classifier, which will be applied in the Data Catalog. Valid values are OpenCSVSerDe, LazySimpleSerDe, and None. You can specify the None value when you want the crawler to do the detection.

    " } }, "documentation":"

    A classifier for custom CSV content.

    " @@ -7641,6 +7649,14 @@ "min":1, "pattern":"[^\\r\\n]" }, + "CsvSerdeOption":{ + "type":"string", + "enum":[ + "OpenCSVSerDe", + "LazySimpleSerDe", + "None" + ] + }, "CustomCode":{ "type":"structure", "required":[ @@ -12772,6 +12788,10 @@ "documentation":"

    The same unique identifier was associated with two different records.

    ", "exception":true }, + "IdleTimeout":{ + "type":"integer", + "box":true + }, "IllegalBlueprintStateException":{ "type":"structure", "members":{ @@ -17795,6 +17815,30 @@ "GlueVersion":{ "shape":"GlueVersionString", "documentation":"

    The Glue version determines the versions of Apache Spark and Python that Glue supports. The GlueVersion must be greater than 2.0.

    " + }, + "NumberOfWorkers":{ + "shape":"NullableInteger", + "documentation":"

    The number of workers of a defined WorkerType to use for the session.

    " + }, + "WorkerType":{ + "shape":"WorkerType", + "documentation":"

    The type of predefined worker that is allocated when a session runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark sessions. Accepts the value Z.2X for Ray sessions.

    " + }, + "CompletedOn":{ + "shape":"TimestampValue", + "documentation":"

    The date and time that this session is completed.

    " + }, + "ExecutionTime":{ + "shape":"NullableDouble", + "documentation":"

    The total time the session ran for.

    " + }, + "DPUSeconds":{ + "shape":"NullableDouble", + "documentation":"

    The DPUs consumed by the session (formula: ExecutionTime * MaxCapacity).

    " + }, + "IdleTimeout":{ + "shape":"IdleTimeout", + "documentation":"

    The number of minutes when idle before the session times out.

    " } }, "documentation":"

    The period in which a remote Spark runtime environment is running.

    " @@ -20074,6 +20118,10 @@ "CustomDatatypes":{ "shape":"CustomDatatypes", "documentation":"

    Specifies a list of supported custom datatypes.

    " + }, + "Serde":{ + "shape":"CsvSerdeOption", + "documentation":"

    Sets the SerDe for processing CSV in the classifier, which will be applied in the Data Catalog. Valid values are OpenCSVSerDe, LazySimpleSerDe, and None. You can specify the None value when you want the crawler to do the detection.

    " } }, "documentation":"

    Specifies a custom CSV classifier to be updated.

    " diff --git a/services/grafana/pom.xml b/services/grafana/pom.xml index 0483115834c..7abf38d84e5 100644 --- a/services/grafana/pom.xml +++ b/services/grafana/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT grafana AWS Java SDK :: Services :: Grafana diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index 16e9e3af33a..00a82c28ced 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrass/src/main/resources/codegen-resources/customization.config b/services/greengrass/src/main/resources/codegen-resources/customization.config index 431c9590330..85d0fe29ba9 100644 --- a/services/greengrass/src/main/resources/codegen-resources/customization.config +++ b/services/greengrass/src/main/resources/codegen-resources/customization.config @@ -12,7 +12,7 @@ "listResourceDefinitions", "listSubscriptionDefinitions" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "associateServiceRoleToAccount", "createCoreDefinition", "createConnectorDefinition", diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index c3416ac7364..e2efb9640d8 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index ad3fe417bf3..09861a45efc 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index f6278bcd72a..638ec4653d2 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 guardduty diff --git a/services/guardduty/src/main/resources/codegen-resources/customization.config b/services/guardduty/src/main/resources/codegen-resources/customization.config index 85c8c09b6c4..3d541f6b5b2 100644 --- a/services/guardduty/src/main/resources/codegen-resources/customization.config +++ b/services/guardduty/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "listDetectors", "listInvitations" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createDetector", "declineInvitations", "deleteInvitations" diff --git a/services/health/pom.xml b/services/health/pom.xml index 95ef8e2589c..f1071367848 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index 1ec3aa9d834..9f24898b743 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/honeycode/pom.xml b/services/honeycode/pom.xml index 770e789c2e2..25bb5ebd1bd 100644 --- a/services/honeycode/pom.xml +++ b/services/honeycode/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT honeycode AWS Java SDK :: Services :: Honeycode diff --git a/services/iam/pom.xml b/services/iam/pom.xml index 45cee3318ce..b9b0d9ee945 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/iam/src/main/resources/codegen-resources/customization.config b/services/iam/src/main/resources/codegen-resources/customization.config index 0f65ae3b4f9..78b7c2df70e 100644 --- a/services/iam/src/main/resources/codegen-resources/customization.config +++ b/services/iam/src/main/resources/codegen-resources/customization.config @@ -26,7 +26,7 @@ "getAccountAuthorizationDetails", "listPolicies" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "updateAccountPasswordPolicy" ], "excludeClientCreateMethod": true, diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index 0d21860fc98..7759f32bb6f 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index b42888fbf85..7e83ac278cb 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 8c6b51f1701..a8f7a380fe1 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/inspector2/pom.xml b/services/inspector2/pom.xml index 237e7b59e57..e17731e327e 100644 --- a/services/inspector2/pom.xml +++ b/services/inspector2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT inspector2 AWS Java SDK :: Services :: Inspector2 diff --git a/services/internetmonitor/pom.xml b/services/internetmonitor/pom.xml index 83fe357ad22..1194fa51ad9 100644 --- a/services/internetmonitor/pom.xml +++ b/services/internetmonitor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT internetmonitor AWS Java SDK :: Services :: Internet Monitor diff --git a/services/iot/pom.xml b/services/iot/pom.xml index 49a840587a1..430642850a0 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot/src/main/resources/codegen-resources/customization.config b/services/iot/src/main/resources/codegen-resources/customization.config index 9f910afc97f..bc16e09001e 100644 --- a/services/iot/src/main/resources/codegen-resources/customization.config +++ b/services/iot/src/main/resources/codegen-resources/customization.config @@ -30,7 +30,7 @@ "listThings", "listTopicRules" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "addThingToThingGroup", "removeThingFromThingGroup", "setV2LoggingOptions", diff --git a/services/iot1clickdevices/pom.xml b/services/iot1clickdevices/pom.xml index d7fef23a201..569de2ee1a5 100644 --- a/services/iot1clickdevices/pom.xml +++ b/services/iot1clickdevices/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iot1clickdevices AWS Java SDK :: Services :: IoT 1Click Devices Service diff --git a/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config b/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config index 0bed30b4797..b947f5dbc95 100644 --- a/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config +++ b/services/iot1clickdevices/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listDevices" ] } diff --git a/services/iot1clickprojects/pom.xml b/services/iot1clickprojects/pom.xml index be8cd289fa6..faf2a3ea85d 100644 --- a/services/iot1clickprojects/pom.xml +++ b/services/iot1clickprojects/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iot1clickprojects AWS Java SDK :: Services :: IoT 1Click Projects diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index d4917204760..8c324d694d8 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotanalytics/src/main/resources/codegen-resources/customization.config b/services/iotanalytics/src/main/resources/codegen-resources/customization.config index 5837f8c071c..ba5ea64efd7 100644 --- a/services/iotanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/iotanalytics/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ "listDatastores", "listPipelines" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeLoggingOptions" ], "shapeModifiers": { diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 3907df5f297..3cd9364f2c7 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index d9329f3b343..4aee4b66646 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index fd0fb0d0722..4e36a790031 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index adba4e284f7..3b83a0e13d5 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index 809a9f85c9a..5a2ca8d8148 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotfleetwise/pom.xml b/services/iotfleetwise/pom.xml index 7b12421d9a4..642c4a1a580 100644 --- a/services/iotfleetwise/pom.xml +++ b/services/iotfleetwise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotfleetwise AWS Java SDK :: Services :: Io T Fleet Wise diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index f5e4f9dbb0a..e3734000043 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotroborunner/pom.xml b/services/iotroborunner/pom.xml index 1dab7a56f86..4540ecfca25 100644 --- a/services/iotroborunner/pom.xml +++ b/services/iotroborunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotroborunner AWS Java SDK :: Services :: IoT Robo Runner diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index 8dbff726202..91fd0ea61ae 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 98c61b98f5e..1ef9a6fbb2a 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index 68abfb48dfa..30368401cf7 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iottwinmaker/pom.xml b/services/iottwinmaker/pom.xml index 832a73a6296..89a45b8d16f 100644 --- a/services/iottwinmaker/pom.xml +++ b/services/iottwinmaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iottwinmaker AWS Java SDK :: Services :: Io T Twin Maker diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index 6d9977ddbda..313c9d43f8b 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index c7c8008a62c..d0903decbdb 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/ivschat/pom.xml b/services/ivschat/pom.xml index 654e44d2c1a..8324eea2faf 100644 --- a/services/ivschat/pom.xml +++ b/services/ivschat/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ivschat AWS Java SDK :: Services :: Ivschat diff --git a/services/ivsrealtime/pom.xml b/services/ivsrealtime/pom.xml index 655c74f7324..45c4829d0ce 100644 --- a/services/ivsrealtime/pom.xml +++ b/services/ivsrealtime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ivsrealtime AWS Java SDK :: Services :: IVS Real Time diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 81207b99fe4..e880284edf2 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafkaconnect/pom.xml b/services/kafkaconnect/pom.xml index 1ca2ce35079..7f7a2a32fd2 100644 --- a/services/kafkaconnect/pom.xml +++ b/services/kafkaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kafkaconnect AWS Java SDK :: Services :: Kafka Connect diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index b73589c87be..250c6767509 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendraranking/pom.xml b/services/kendraranking/pom.xml index f20d54b2bbd..7ac87207205 100644 --- a/services/kendraranking/pom.xml +++ b/services/kendraranking/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kendraranking AWS Java SDK :: Services :: Kendra Ranking diff --git a/services/keyspaces/pom.xml b/services/keyspaces/pom.xml index 4d16be2e20c..4b601e824c4 100644 --- a/services/keyspaces/pom.xml +++ b/services/keyspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT keyspaces AWS Java SDK :: Services :: Keyspaces diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 9dc90d1215c..8a8f1eec6d9 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesis/src/main/resources/codegen-resources/customization.config b/services/kinesis/src/main/resources/codegen-resources/customization.config index fdf66d583a8..caeb1f7c650 100644 --- a/services/kinesis/src/main/resources/codegen-resources/customization.config +++ b/services/kinesis/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ "customServiceMetadata": { "protocol": "cbor" }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deregisterStreamConsumer", "describeStreamConsumer", "listShards" diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index e76049d9645..5ed8924972c 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config b/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config index 282a5331893..d1bf750d26b 100644 --- a/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "listApplications" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "discoverInputSchema" ] } diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index 702a9f6c54b..091899748d7 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 296f355b1af..79b582f6540 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/customization.config b/services/kinesisvideo/src/main/resources/codegen-resources/customization.config index becde5e5fc8..e0972ef57fe 100644 --- a/services/kinesisvideo/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideo/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "listStreams" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "listTagsForStream", "describeStream" ] diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index cb479896aee..b968a021f89 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config index 97db0dd1e81..232ca942ec4 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "getHLSStreamingSessionURL" ] } diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index 55832fdd3b6..45069286357 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 8756eafe475..8c7e7becbb7 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kinesisvideowebrtcstorage/pom.xml b/services/kinesisvideowebrtcstorage/pom.xml index ef9939dacb9..a56e46f6335 100644 --- a/services/kinesisvideowebrtcstorage/pom.xml +++ b/services/kinesisvideowebrtcstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kinesisvideowebrtcstorage AWS Java SDK :: Services :: Kinesis Video Web RTC Storage diff --git a/services/kms/pom.xml b/services/kms/pom.xml index 5aec2c2d6cc..95b8ef058de 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index bd50a703a6b..8692f073cb2 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index a474099293a..de6446f803c 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index 83d194f193e..d0ca65a7e0c 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index da511654ac9..24550405f39 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 39dabf8b57c..5fb6d567373 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://models-v2-lex-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://models-v2-lex.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://models-v2-lex.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://models-v2-lex.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://models-v2-lex.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json index fa71d1cade5..756675f3c8e 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json @@ -13666,7 +13666,7 @@ "type":"integer", "box":true, "max":3, - "min":1 + "min":0 } }, "documentation":"

    " diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index 67b37cfdd20..a9c8ea8241e 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index b3912070dad..3c1a7864f31 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index d163efe6f1a..5c8c44a17c7 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanager/src/main/resources/codegen-resources/customization.config b/services/licensemanager/src/main/resources/codegen-resources/customization.config index b98b012071c..cb1971ffe74 100644 --- a/services/licensemanager/src/main/resources/codegen-resources/customization.config +++ b/services/licensemanager/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "getServiceSettings", "listLicenseConfigurations" ], - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "getServiceSettings", "listLicenseConfigurations", "listResourceInventory" diff --git a/services/licensemanagerlinuxsubscriptions/pom.xml b/services/licensemanagerlinuxsubscriptions/pom.xml index c0ddca90edb..405d3c0f3dd 100644 --- a/services/licensemanagerlinuxsubscriptions/pom.xml +++ b/services/licensemanagerlinuxsubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT licensemanagerlinuxsubscriptions AWS Java SDK :: Services :: License Manager Linux Subscriptions diff --git a/services/licensemanagerusersubscriptions/pom.xml b/services/licensemanagerusersubscriptions/pom.xml index e34ed4a7743..04005dc5623 100644 --- a/services/licensemanagerusersubscriptions/pom.xml +++ b/services/licensemanagerusersubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT licensemanagerusersubscriptions AWS Java SDK :: Services :: License Manager User Subscriptions diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index 53095b2a665..2e3016e278c 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/location/pom.xml b/services/location/pom.xml index 4001be5f9ed..438f0d95de1 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 381a2b63c8d..f98a1ec79e8 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index 7eadcb29a09..ce65be973ec 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index 44f0a16a00e..e83eeb85769 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/m2/pom.xml b/services/m2/pom.xml index 4e595cf5fa0..5ebefd4f355 100644 --- a/services/m2/pom.xml +++ b/services/m2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT m2 AWS Java SDK :: Services :: M2 diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index e21c1a8a41d..3f39960e275 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/macie/pom.xml b/services/macie/pom.xml index dfab482f65d..739707de376 100644 --- a/services/macie/pom.xml +++ b/services/macie/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT macie AWS Java SDK :: Services :: Macie diff --git a/services/macie/src/main/resources/codegen-resources/customization.config b/services/macie/src/main/resources/codegen-resources/customization.config index 15a246df948..c0e92ca2cb1 100644 --- a/services/macie/src/main/resources/codegen-resources/customization.config +++ b/services/macie/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listMemberAccounts", "listS3Resources" ] diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index 91375a5dd68..559ec70d4d6 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index 1d235f59ab8..b792895d889 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/managedblockchainquery/pom.xml b/services/managedblockchainquery/pom.xml index b50a8479cf9..28c237f29ef 100644 --- a/services/managedblockchainquery/pom.xml +++ b/services/managedblockchainquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT managedblockchainquery AWS Java SDK :: Services :: Managed Blockchain Query diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index 56bf88cc674..6e60f13648b 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 0a5d01b6bb2..e2b8bb0a12a 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 20b908726c8..a25f2dc5aa3 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index 8711e7594cc..ac0af119913 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index b4ebc5183db..b9243e680a0 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index e3cfe40b7a4..7f9547feba9 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/customization.config b/services/mediaconvert/src/main/resources/codegen-resources/customization.config index 6abaf75b14b..e5d2b586984 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/customization.config +++ b/services/mediaconvert/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["*"] + "excludedSimpleMethods" : ["*"] } diff --git a/services/mediaconvert/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mediaconvert/src/main/resources/codegen-resources/endpoint-rule-set.json index dcc0061568b..b3cfd3d2ad8 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,175 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - "aws-us-gov", - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - } - ] - } - ], - "endpoint": { - "url": "https://mediaconvert.{Region}.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "PartitionResult" }, - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -304,91 +225,134 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-us-gov", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediaconvert.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://mediaconvert.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://mediaconvert-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { "conditions": [], - "type": "tree", - "rules": [ + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "cn-northwest-1" + "supportsDualStack" ] } - ], - "endpoint": { - "url": "https://subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [], "endpoint": { - "url": "https://mediaconvert.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://mediaconvert.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "cn-northwest-1" + ] + } + ], + "endpoint": { + "url": "https://subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://mediaconvert.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index c6ee5fd641d..1cd4efb6ac8 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -543,7 +543,7 @@ "documentation": "The service couldn't complete your request because there is a conflict with the current state of the resource." } ], - "documentation": "Retrieve the JSON for a specific completed transcoding job." + "documentation": "Retrieve the JSON for a specific transcoding job." }, "GetJobTemplate": { "name": "GetJobTemplate", @@ -1617,7 +1617,20 @@ "TCS", "VHL", "VHC", - "VHR" + "VHR", + "TBL", + "TBC", + "TBR", + "RSL", + "RSR", + "LW", + "RW", + "LFE2", + "LT", + "RT", + "HI", + "NAR", + "M" ] }, "AudioChannelTaggingSettings": { @@ -1645,7 +1658,8 @@ "EAC3_ATMOS", "VORBIS", "OPUS", - "PASSTHROUGH" + "PASSTHROUGH", + "FLAC" ] }, "AudioCodecSettings": { @@ -1681,6 +1695,11 @@ "locationName": "eac3Settings", "documentation": "Required when you set Codec to the value EAC3." }, + "FlacSettings": { + "shape": "FlacSettings", + "locationName": "flacSettings", + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value FLAC." + }, "Mp2Settings": { "shape": "Mp2Settings", "locationName": "mp2Settings", @@ -2003,7 +2022,7 @@ "MaxAbrBitrate": { "shape": "__integerMin100000Max100000000", "locationName": "maxAbrBitrate", - "documentation": "Optional. The maximum target bit rate used in your automated ABR stack. Use this value to set an upper limit on the bandwidth consumed by the highest-quality rendition. This is the rendition that is delivered to viewers with the fastest internet connections. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) by default." + "documentation": "Specify the maximum average bitrate for MediaConvert to use in your automated ABR stack. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) by default. The average bitrate of your highest-quality rendition will be equal to or below this value, depending on the quality, complexity, and resolution of your content. Note that the instantaneous maximum bitrate may vary above the value that you specify." }, "MaxRenditions": { "shape": "__integerMin3Max15", @@ -2013,7 +2032,7 @@ "MinAbrBitrate": { "shape": "__integerMin100000Max100000000", "locationName": "minAbrBitrate", - "documentation": "Optional. The minimum target bitrate used in your automated ABR stack. Use this value to set a lower limit on the bitrate of video delivered to viewers with slow internet connections. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) by default." + "documentation": "Specify the minimum average bitrate for MediaConvert to use in your automated ABR stack. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) by default. The average bitrate of your lowest-quality rendition will be near this value. Note that the instantaneous minimum bitrate may vary below the value that you specify." }, "Rules": { "shape": "__listOfAutomatedAbrRule", @@ -2054,6 +2073,14 @@ "BIT_10" ] }, + "Av1FilmGrainSynthesis": { + "type": "string", + "documentation": "Film grain synthesis replaces film grain present in your content with similar quality synthesized AV1 film grain. We recommend that you choose Enabled to reduce the bandwidth of your QVBR quality level 5, 6, 7, or 8 outputs. For QVBR quality level 9 or 10 outputs we recommend that you keep the default value, Disabled. When you include Film grain synthesis, you cannot include the Noise reducer preprocessor.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "Av1FramerateControl": { "type": "string", "documentation": "Use the Framerate setting to specify the frame rate for this output. If you want to keep the same frame rate as the input video, choose Follow source. If you want to do frame rate conversion, choose a frame rate from the dropdown list or choose Custom. The framerates shown in the dropdown list are decimal approximations of fractions. If you choose Custom, specify your frame rate as a fraction.", @@ -2107,6 +2134,11 @@ "locationName": "bitDepth", "documentation": "Specify the Bit depth. You can choose 8-bit or 10-bit." }, + "FilmGrainSynthesis": { + "shape": "Av1FilmGrainSynthesis", + "locationName": "filmGrainSynthesis", + "documentation": "Film grain synthesis replaces film grain present in your content with similar quality synthesized AV1 film grain. We recommend that you choose Enabled to reduce the bandwidth of your QVBR quality level 5, 6, 7, or 8 outputs. For QVBR quality level 9 or 10 outputs we recommend that you keep the default value, Disabled. When you include Film grain synthesis, you cannot include the Noise reducer preprocessor." + }, "FramerateControl": { "shape": "Av1FramerateControl", "locationName": "framerateControl", @@ -5410,6 +5442,27 @@ "MILLISECONDS" ] }, + "FlacSettings": { + "type": "structure", + "members": { + "BitDepth": { + "shape": "__integerMin16Max24", + "locationName": "bitDepth", + "documentation": "Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track." + }, + "Channels": { + "shape": "__integerMin1Max8", + "locationName": "channels", + "documentation": "Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are between 1 and 8." + }, + "SampleRate": { + "shape": "__integerMin22050Max48000", + "locationName": "sampleRate", + "documentation": "Sample rate in hz." + } + }, + "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value FLAC." + }, "FontScript": { "type": "string", "documentation": "Provide the font script, using an ISO 15924 script code, if the LanguageCode is not sufficient for determining the script type. Where LanguageCode or CustomLanguageCode is sufficient, use \"AUTOMATIC\" or leave unset.", @@ -8868,6 +8921,16 @@ "locationName": "programNumber", "documentation": "Use Program number to specify the program number used in the program map table (PMT) for this output. Default is 1. Program numbers and program map tables are parts of MPEG-2 transport stream containers, used for organizing data." }, + "PtsOffset": { + "shape": "__integerMin0Max3600", + "locationName": "ptsOffset", + "documentation": "Manually specify the initial PTS offset, in seconds, when you set PTS offset to Seconds. Enter an integer from 0 to 3600. Leave blank to keep the default value 2." + }, + "PtsOffsetMode": { + "shape": "TsPtsOffset", + "locationName": "ptsOffsetMode", + "documentation": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset." + }, "RateMode": { "shape": "M2tsRateMode", "locationName": "rateMode", @@ -9029,6 +9092,16 @@ "locationName": "programNumber", "documentation": "The value of the program number field in the Program Map Table." }, + "PtsOffset": { + "shape": "__integerMin0Max3600", + "locationName": "ptsOffset", + "documentation": "Manually specify the initial PTS offset, in seconds, when you set PTS offset to Seconds. Enter an integer from 0 to 3600. Leave blank to keep the default value 2." + }, + "PtsOffsetMode": { + "shape": "TsPtsOffset", + "locationName": "ptsOffsetMode", + "documentation": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset." + }, "Scte35Pid": { "shape": "__integerMin32Max8182", "locationName": "scte35Pid", @@ -11078,6 +11151,11 @@ "shape": "S3EncryptionSettings", "locationName": "encryption", "documentation": "Settings for how your job outputs are encrypted as they are uploaded to Amazon S3." + }, + "StorageClass": { + "shape": "S3StorageClass", + "locationName": "storageClass", + "documentation": "Specify the S3 storage class to use for this destination." } }, "documentation": "Settings associated with S3 destination" @@ -11121,6 +11199,19 @@ "SERVER_SIDE_ENCRYPTION_KMS" ] }, + "S3StorageClass": { + "type": "string", + "documentation": "Specify the S3 storage class to use for this destination.", + "enum": [ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE" + ] + }, "SampleRangeConversion": { "type": "string", "documentation": "Specify how MediaConvert limits the color sample range for this output. To create a limited range output from a full range input: Choose Limited range squeeze. For full range inputs, MediaConvert performs a linear offset to color samples equally across all pixels and frames. Color samples in 10-bit outputs are limited to 64 through 940, and 8-bit outputs are limited to 16 through 235. Note: For limited range inputs, values for color samples are passed through to your output unchanged. MediaConvert does not limit the sample range. To correct pixels in your input that are out of range or out of gamut: Choose Limited range clip. Use for broadcast applications. MediaConvert conforms any pixels outside of the values that you specify under Minimum YUV and Maximum YUV to limited range bounds. MediaConvert also corrects any YUV values that, when converted to RGB, would be outside the bounds you specify under Minimum RGB tolerance and Maximum RGB tolerance. With either limited range conversion, MediaConvert writes the sample range metadata in the output.", @@ -11489,6 +11580,14 @@ }, "documentation": "Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings." }, + "TsPtsOffset": { + "type": "string", + "documentation": "Specify the initial presentation timestamp (PTS) offset for your transport stream output. To let MediaConvert automatically determine the initial PTS offset: Keep the default value, Auto. We recommend that you choose Auto for the widest player compatibility. The initial PTS will be at least two seconds and vary depending on your output's bitrate, HRD buffer size and HRD buffer initial fill percentage. To manually specify an initial PTS offset: Choose Seconds. Then specify the number of seconds with PTS offset.", + "enum": [ + "AUTO", + "SECONDS" + ] + }, "TtmlDestinationSettings": { "type": "structure", "members": { @@ -13193,6 +13292,11 @@ "min": 1, "max": 64 }, + "__integerMin1Max8": { + "type": "integer", + "min": 1, + "max": 8 + }, "__integerMin22050Max48000": { "type": "integer", "min": 22050, diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index c2b7f194f31..6cf6322fe79 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 medialive diff --git a/services/medialive/src/main/resources/codegen-resources/customization.config b/services/medialive/src/main/resources/codegen-resources/customization.config index 4f122f084d3..2b6d3476b2d 100644 --- a/services/medialive/src/main/resources/codegen-resources/customization.config +++ b/services/medialive/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ "listOfferings", "listReservations" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createChannel", "createInput", "createInputSecurityGroup" diff --git a/services/medialive/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/medialive/src/main/resources/codegen-resources/endpoint-rule-set.json index a354a5ca273..93e47c55b38 100644 --- a/services/medialive/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/medialive/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://medialive-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://medialive-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://medialive-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://medialive-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://medialive.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://medialive.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://medialive.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://medialive.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index 12e6a1f6179..26abe258a32 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -3031,6 +3031,14 @@ "MEDIUM_LOW" ] }, + "Ac3AttenuationControl": { + "type": "string", + "documentation": "Ac3 Attenuation Control", + "enum": [ + "ATTENUATE_3_DB", + "NONE" + ] + }, "Ac3BitstreamMode": { "type": "string", "documentation": "Ac3 Bitstream Mode", @@ -3116,6 +3124,11 @@ "shape": "Ac3MetadataControl", "locationName": "metadataControl", "documentation": "When set to \"followInput\", encoder metadata will be sourced from the DD, DD+, or DolbyE decoder that supplied this audio data. If audio was not supplied from one of these streams, then the static metadata settings will be used." + }, + "AttenuationControl": { + "shape": "Ac3AttenuationControl", + "locationName": "attenuationControl", + "documentation": "Applies a 3 dB attenuation to the surround channels. Applies only when the coding mode parameter is CODING_MODE_3_2_LFE." } }, "documentation": "Ac3 Settings" @@ -9491,6 +9504,15 @@ }, "documentation": "Settings to configure an action so that it occurs as soon as possible." }, + "IncludeFillerNalUnits": { + "type": "string", + "documentation": "Include Filler Nal Units", + "enum": [ + "AUTO", + "DROP", + "INCLUDE" + ] + }, "Input": { "type": "structure", "members": { @@ -11654,6 +11676,14 @@ "PASSTHROUGH" ] }, + "M3u8KlvBehavior": { + "type": "string", + "documentation": "M3u8 Klv Behavior", + "enum": [ + "NO_PASSTHROUGH", + "PASSTHROUGH" + ] + }, "M3u8NielsenId3Behavior": { "type": "string", "documentation": "M3u8 Nielsen Id3 Behavior", @@ -11765,6 +11795,16 @@ "shape": "__string", "locationName": "videoPid", "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream. Can be entered as a decimal or hexadecimal value." + }, + "KlvBehavior": { + "shape": "M3u8KlvBehavior", + "locationName": "klvBehavior", + "documentation": "If set to passthrough, passes any KLV data from the input source to this output." + }, + "KlvDataPids": { + "shape": "__string", + "locationName": "klvDataPids", + "documentation": "Packet Identifier (PID) for input source KLV data to this output. Multiple values are accepted, and can be entered in ranges and/or by comma separation. Can be entered as decimal or hexadecimal values. Each PID specified must be in the range of 32 (or 0x20)..8182 (or 0x1ff6)." } }, "documentation": "Settings information for the .m3u8 container" @@ -13819,6 +13859,11 @@ "shape": "__integerMin0", "locationName": "restartDelay", "documentation": "If a streaming output fails, number of seconds to wait until a restart is initiated. A value of 0 means never restart." + }, + "IncludeFillerNalUnits": { + "shape": "IncludeFillerNalUnits", + "locationName": "includeFillerNalUnits", + "documentation": "Applies only when the rate control mode (in the codec settings) is CBR (constant bit rate). Controls whether the RTMP output stream is padded (with FILL NAL units) in order to achieve a constant bit rate that is truly constant. When there is no padding, the bandwidth varies (up to the bitrate value in the codec settings). We recommend that you choose Auto." } }, "documentation": "Rtmp Group Settings" diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index 140674caa34..f6e509bab15 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackage/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mediapackage/src/main/resources/codegen-resources/endpoint-rule-set.json index 471664d7912..94f09fea26e 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/mediapackage/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediapackage-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://mediapackage-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://mediapackage-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://mediapackage-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://mediapackage.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://mediapackage.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://mediapackage.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://mediapackage.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/mediapackage/src/main/resources/codegen-resources/endpoint-tests.json b/services/mediapackage/src/main/resources/codegen-resources/endpoint-tests.json index 89d130745a4..9d863247b5a 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/mediapackage/src/main/resources/codegen-resources/endpoint-tests.json @@ -351,6 +351,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -364,6 +375,28 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -377,6 +410,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -440,6 +484,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/mediapackage/src/main/resources/codegen-resources/service-2.json b/services/mediapackage/src/main/resources/codegen-resources/service-2.json index f591b0e51dc..65c3f2e0081 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/service-2.json +++ b/services/mediapackage/src/main/resources/codegen-resources/service-2.json @@ -1983,7 +1983,7 @@ "Password": { "documentation": "The system generated password for ingest authentication.", "locationName": "password", - "shape": "__string" + "shape": "SensitiveString" }, "Url": { "documentation": "The ingest URL to which the source stream should be sent.", @@ -1993,7 +1993,7 @@ "Username": { "documentation": "The system generated username for ingest authentication.", "locationName": "username", - "shape": "__string" + "shape": "SensitiveString" } }, "type": "structure" @@ -2655,6 +2655,10 @@ ], "type": "string" }, + "SensitiveString": { + "sensitive": true, + "type": "string" + }, "ServiceUnavailableException": { "documentation": "An unexpected error occurred.", "error": { diff --git a/services/mediapackagev2/pom.xml b/services/mediapackagev2/pom.xml index 16cdfacabf5..2e893f11d22 100644 --- a/services/mediapackagev2/pom.xml +++ b/services/mediapackagev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT mediapackagev2 AWS Java SDK :: Services :: Media Package V2 diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index 9647ed42f00..27ace717581 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index ba5ffed496c..3b49c3c96ba 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastore/src/main/resources/codegen-resources/customization.config b/services/mediastore/src/main/resources/codegen-resources/customization.config index cc404c84d6c..933304e140c 100644 --- a/services/mediastore/src/main/resources/codegen-resources/customization.config +++ b/services/mediastore/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "listContainers" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeContainer" ] } diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index ca5c667ee50..1eaee628e4f 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 mediastoredata @@ -74,11 +74,5 @@ commons-lang3 test - - software.amazon.awssdk - mediastore - ${awsjavasdk.version} - test - diff --git a/services/mediastoredata/src/main/resources/codegen-resources/customization.config b/services/mediastoredata/src/main/resources/codegen-resources/customization.config index 18dafbef2ba..a5746334a29 100644 --- a/services/mediastoredata/src/main/resources/codegen-resources/customization.config +++ b/services/mediastoredata/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listItems" ] } diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index ba3f32adef0..b2c9255e5f3 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/mediatailor/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mediatailor/src/main/resources/codegen-resources/endpoint-rule-set.json index 03d6980c4d0..f6487bddd7f 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/mediatailor/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://api.mediatailor-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.mediatailor.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://api.mediatailor.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://api.mediatailor.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://api.mediatailor.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/mediatailor/src/main/resources/codegen-resources/service-2.json b/services/mediatailor/src/main/resources/codegen-resources/service-2.json index 9aa11e84a2c..f7fade28b13 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/service-2.json +++ b/services/mediatailor/src/main/resources/codegen-resources/service-2.json @@ -536,7 +536,7 @@ "members":{ "AccessType":{ "shape":"AccessType", - "documentation":"

    The type of authentication used to access content from HttpConfiguration::BaseUrl on your source location. Accepted value: S3_SIGV4.

    S3_SIGV4 - AWS Signature Version 4 authentication for Amazon S3 hosted virtual-style access. If your source location base URL is an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the bucket where your source content is stored. Your MediaTailor source location baseURL must follow the S3 virtual hosted-style request URL format. For example, https://bucket-name.s3.Region.amazonaws.com/key-name.

    Before you can use S3_SIGV4, you must meet these requirements:

    • You must allow MediaTailor to access your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For information about configuring access in IAM, see Access management in the IAM User Guide.

    • The mediatailor.amazonaws.com service principal must have permissions to read all top level manifests referenced by the VodSource packaging configurations.

    • The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

    " + "documentation":"

    The type of authentication used to access content from HttpConfiguration::BaseUrl on your source location.

    S3_SIGV4 - AWS Signature Version 4 authentication for Amazon S3 hosted virtual-style access. If your source location base URL is an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the bucket where your source content is stored. Your MediaTailor source location baseURL must follow the S3 virtual hosted-style request URL format. For example, https://bucket-name.s3.Region.amazonaws.com/key-name.

    Before you can use S3_SIGV4, you must meet these requirements:

    • You must allow MediaTailor to access your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For information about configuring access in IAM, see Access management in the IAM User Guide.

    • The mediatailor.amazonaws.com service principal must have permissions to read all top level manifests referenced by the VodSource packaging configurations.

    • The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

    AUTODETECT_SIGV4 - AWS Signature Version 4 authentication for a set of supported services: MediaPackage Version 2 and Amazon S3 hosted virtual-style access. If your source location base URL is a MediaPackage Version 2 endpoint or an Amazon S3 bucket, MediaTailor can use AWS Signature Version 4 (SigV4) authentication to access the resource where your source content is stored.

    Before you can use AUTODETECT_SIGV4 with a MediaPackage Version 2 endpoint, you must meet these requirements:

    • You must grant MediaTailor access to your MediaPackage endpoint by granting mediatailor.amazonaws.com principal access in an Origin Access policy on the endpoint.

    • Your MediaTailor source location base URL must be a MediaPackage V2 endpoint.

    • The caller of the API must have mediapackagev2:GetObject IAM permissions to read all top level manifests referenced by the MediaTailor source packaging configurations.

    Before you can use AUTODETECT_SIGV4 with an Amazon S3 bucket, you must meet these requirements:

    • You must grant MediaTailor access to your S3 bucket by granting mediatailor.amazonaws.com principal access in IAM. For more information about configuring access in IAM, see Access management in the IAM User Guide..

    • The mediatailor.amazonaws.com service principal must have permissions to read all top-level manifests referenced by the VodSource packaging configurations.

    • The caller of the API must have s3:GetObject IAM permissions to read all top level manifests referenced by your MediaTailor VodSource packaging configurations.

    " }, "SecretsManagerAccessTokenConfiguration":{ "shape":"SecretsManagerAccessTokenConfiguration", @@ -549,7 +549,8 @@ "type":"string", "enum":[ "S3_SIGV4", - "SECRETS_MANAGER_ACCESS_TOKEN" + "SECRETS_MANAGER_ACCESS_TOKEN", + "AUTODETECT_SIGV4" ] }, "AdBreak":{ diff --git a/services/medicalimaging/pom.xml b/services/medicalimaging/pom.xml index 5ebe30ed9f6..ecc27611411 100644 --- a/services/medicalimaging/pom.xml +++ b/services/medicalimaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT medicalimaging AWS Java SDK :: Services :: Medical Imaging diff --git a/services/memorydb/pom.xml b/services/memorydb/pom.xml index 509d976f4b5..b379790d634 100644 --- a/services/memorydb/pom.xml +++ b/services/memorydb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT memorydb AWS Java SDK :: Services :: Memory DB diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index 829c6e76179..0e095ec69ef 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index 07917103ac0..9f4b5d16d6a 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhub/src/main/resources/codegen-resources/customization.config b/services/migrationhub/src/main/resources/codegen-resources/customization.config index 6abaf75b14b..e5d2b586984 100644 --- a/services/migrationhub/src/main/resources/codegen-resources/customization.config +++ b/services/migrationhub/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,3 @@ { - "blacklistedSimpleMethods" : ["*"] + "excludedSimpleMethods" : ["*"] } diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index bf216186376..55541c2f7c6 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhuborchestrator/pom.xml b/services/migrationhuborchestrator/pom.xml index 472aeedcb6a..b266c97b353 100644 --- a/services/migrationhuborchestrator/pom.xml +++ b/services/migrationhuborchestrator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT migrationhuborchestrator AWS Java SDK :: Services :: Migration Hub Orchestrator diff --git a/services/migrationhubrefactorspaces/pom.xml b/services/migrationhubrefactorspaces/pom.xml index a08bb023952..c3ce1207668 100644 --- a/services/migrationhubrefactorspaces/pom.xml +++ b/services/migrationhubrefactorspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT migrationhubrefactorspaces AWS Java SDK :: Services :: Migration Hub Refactor Spaces diff --git a/services/migrationhubstrategy/pom.xml b/services/migrationhubstrategy/pom.xml index bab3418a884..3b4460c316e 100644 --- a/services/migrationhubstrategy/pom.xml +++ b/services/migrationhubstrategy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT migrationhubstrategy AWS Java SDK :: Services :: Migration Hub Strategy diff --git a/services/mobile/pom.xml b/services/mobile/pom.xml index b6c4e7f6612..1bf5e9a8c0a 100644 --- a/services/mobile/pom.xml +++ b/services/mobile/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 mobile diff --git a/services/mobile/src/main/resources/codegen-resources/customization.config b/services/mobile/src/main/resources/codegen-resources/customization.config index 953b9278a88..27d3c7e09b7 100644 --- a/services/mobile/src/main/resources/codegen-resources/customization.config +++ b/services/mobile/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "listBundles", "listProjects" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createProject" ] } diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 606a519775d..34e77705267 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 mq diff --git a/services/mq/src/main/resources/codegen-resources/customization.config b/services/mq/src/main/resources/codegen-resources/customization.config index a0dbd42cc12..fd2da26cf90 100644 --- a/services/mq/src/main/resources/codegen-resources/customization.config +++ b/services/mq/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "listBrokers", "listConfigurations" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createBroker", "createConfiguration" ] diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index 2608820fd5e..3381d8d823f 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mturk/src/main/resources/codegen-resources/customization.config b/services/mturk/src/main/resources/codegen-resources/customization.config index 1827bbb57cb..8d6cca6e6d5 100644 --- a/services/mturk/src/main/resources/codegen-resources/customization.config +++ b/services/mturk/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : ["listBonusPayments"], + "excludedSimpleMethods" : ["listBonusPayments"], "verifiedSimpleMethods" : [ "listWorkerBlocks", "listReviewableHITs", diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index 67b54ab349a..e2d570fb3fc 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 2f77db6b822..d7a6ec4b81f 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index e4f0b6981b0..d1989bf9ae6 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index 9664a137455..9b07e7a80da 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/nimble/pom.xml b/services/nimble/pom.xml index b94f402c267..9541a903c0c 100644 --- a/services/nimble/pom.xml +++ b/services/nimble/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT nimble AWS Java SDK :: Services :: Nimble diff --git a/services/oam/pom.xml b/services/oam/pom.xml index b596e7b8b4b..ae637e594da 100644 --- a/services/oam/pom.xml +++ b/services/oam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT oam AWS Java SDK :: Services :: OAM diff --git a/services/omics/pom.xml b/services/omics/pom.xml index 988db696927..0caf9ca4b6d 100644 --- a/services/omics/pom.xml +++ b/services/omics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT omics AWS Java SDK :: Services :: Omics diff --git a/services/omics/src/main/resources/codegen-resources/paginators-1.json b/services/omics/src/main/resources/codegen-resources/paginators-1.json index 596e4bd7c24..0254c8f78fa 100644 --- a/services/omics/src/main/resources/codegen-resources/paginators-1.json +++ b/services/omics/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,12 @@ "limit_key": "maxResults", "result_key": "annotationImportJobs" }, + "ListAnnotationStoreVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "annotationStoreVersions" + }, "ListAnnotationStores": { "input_token": "nextToken", "output_token": "nextToken", @@ -90,6 +96,12 @@ "limit_key": "maxResults", "result_key": "sequenceStores" }, + "ListShares": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "shares" + }, "ListVariantImportJobs": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/omics/src/main/resources/codegen-resources/service-2.json b/services/omics/src/main/resources/codegen-resources/service-2.json index 1d25521ae1b..ce387de2ec3 100644 --- a/services/omics/src/main/resources/codegen-resources/service-2.json +++ b/services/omics/src/main/resources/codegen-resources/service-2.json @@ -35,6 +35,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"control-storage-"} }, + "AcceptShare":{ + "name":"AcceptShare", + "http":{ + "method":"POST", + "requestUri":"/share/{shareId}", + "responseCode":200 + }, + "input":{"shape":"AcceptShareRequest"}, + "output":{"shape":"AcceptShareResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Accepts a share for an analytics store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "BatchDeleteReadSet":{ "name":"BatchDeleteReadSet", "http":{ @@ -166,6 +188,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"analytics-"} }, + "CreateAnnotationStoreVersion":{ + "name":"CreateAnnotationStoreVersion", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}/version", + "responseCode":200 + }, + "input":{"shape":"CreateAnnotationStoreVersionRequest"}, + "output":{"shape":"CreateAnnotationStoreVersionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a new version of an annotation store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "CreateMultipartReadSetUpload":{ "name":"CreateMultipartReadSetUpload", "http":{ @@ -254,6 +298,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"control-storage-"} }, + "CreateShare":{ + "name":"CreateShare", + "http":{ + "method":"POST", + "requestUri":"/share", + "responseCode":200 + }, + "input":{"shape":"CreateShareRequest"}, + "output":{"shape":"CreateShareResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Creates a share offer that can be accepted outside the account by a subscriber. The share is created by the owner and accepted by the principal subscriber.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "CreateVariantStore":{ "name":"CreateVariantStore", "http":{ @@ -321,6 +387,28 @@ "endpoint":{"hostPrefix":"analytics-"}, "idempotent":true }, + "DeleteAnnotationStoreVersions":{ + "name":"DeleteAnnotationStoreVersions", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}/versions/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteAnnotationStoreVersionsRequest"}, + "output":{"shape":"DeleteAnnotationStoreVersionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes one or multiple versions of an annotation store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"}, + "idempotent":true + }, "DeleteReference":{ "name":"DeleteReference", "http":{ @@ -436,6 +524,29 @@ "endpoint":{"hostPrefix":"control-storage-"}, "idempotent":true }, + "DeleteShare":{ + "name":"DeleteShare", + "http":{ + "method":"DELETE", + "requestUri":"/share/{shareId}", + "responseCode":200 + }, + "input":{"shape":"DeleteShareRequest"}, + "output":{"shape":"DeleteShareResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Deletes a share of an analytics store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"}, + "idempotent":true + }, "DeleteVariantStore":{ "name":"DeleteVariantStore", "http":{ @@ -521,6 +632,26 @@ "authtype":"v4", "endpoint":{"hostPrefix":"analytics-"} }, + "GetAnnotationStoreVersion":{ + "name":"GetAnnotationStoreVersion", + "http":{ + "method":"GET", + "requestUri":"/annotationStore/{name}/version/{versionName}", + "responseCode":200 + }, + "input":{"shape":"GetAnnotationStoreVersionRequest"}, + "output":{"shape":"GetAnnotationStoreVersionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves the metadata for an annotation store version.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "GetReadSet":{ "name":"GetReadSet", "http":{ @@ -803,6 +934,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"control-storage-"} }, + "GetShare":{ + "name":"GetShare", + "http":{ + "method":"GET", + "requestUri":"/share/{shareId}", + "responseCode":200 + }, + "input":{"shape":"GetShareRequest"}, + "output":{"shape":"GetShareResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves the metadata for a share.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "GetVariantImportJob":{ "name":"GetVariantImportJob", "http":{ @@ -886,6 +1039,26 @@ "authtype":"v4", "endpoint":{"hostPrefix":"analytics-"} }, + "ListAnnotationStoreVersions":{ + "name":"ListAnnotationStoreVersions", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}/versions", + "responseCode":200 + }, + "input":{"shape":"ListAnnotationStoreVersionsRequest"}, + "output":{"shape":"ListAnnotationStoreVersionsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists the versions of an annotation store.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "ListAnnotationStores":{ "name":"ListAnnotationStores", "http":{ @@ -1187,6 +1360,28 @@ "authtype":"v4", "endpoint":{"hostPrefix":"control-storage-"} }, + "ListShares":{ + "name":"ListShares", + "http":{ + "method":"POST", + "requestUri":"/shares", + "responseCode":200 + }, + "input":{"shape":"ListSharesRequest"}, + "output":{"shape":"ListSharesResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists all shares associated with an account.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -1401,7 +1596,7 @@ {"shape":"AccessDeniedException"}, {"shape":"RequestTimeoutException"} ], - "documentation":"

    Starts a run.

    ", + "documentation":"

    Starts a workflow run. To duplicate a run, specify the run's ID and a role ARN. The remaining parameters are copied from the previous run.

    The total number of runs in your account is subject to a quota per Region. To avoid needing to delete runs manually, you can set the retention mode to REMOVE. Runs with this setting are deleted automatically when the run quoata is exceeded.

    ", "authtype":"v4", "endpoint":{"hostPrefix":"workflows-"} }, @@ -1493,6 +1688,26 @@ "authtype":"v4", "endpoint":{"hostPrefix":"analytics-"} }, + "UpdateAnnotationStoreVersion":{ + "name":"UpdateAnnotationStoreVersion", + "http":{ + "method":"POST", + "requestUri":"/annotationStore/{name}/version/{versionName}", + "responseCode":200 + }, + "input":{"shape":"UpdateAnnotationStoreVersionRequest"}, + "output":{"shape":"UpdateAnnotationStoreVersionResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Updates the description of an annotation store version.

    ", + "authtype":"v4", + "endpoint":{"hostPrefix":"analytics-"} + }, "UpdateRunGroup":{ "name":"UpdateRunGroup", "http":{ @@ -1614,6 +1829,27 @@ "max":64, "min":1 }, + "AcceptShareRequest":{ + "type":"structure", + "required":["shareId"], + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    The ID for a share offer for analytics store data.

    ", + "location":"uri", + "locationName":"shareId" + } + } + }, + "AcceptShareResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ShareStatus", + "documentation":"

    The status of an analytics store share.

    " + } + } + }, "AccessDeniedException":{ "type":"structure", "required":["message"], @@ -1773,6 +2009,7 @@ "required":[ "id", "destinationName", + "versionName", "roleArn", "status", "creationTime", @@ -1787,6 +2024,10 @@ "shape":"String", "documentation":"

    The job's destination annotation store.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of the annotation store version.

    " + }, "roleArn":{ "shape":"Arn", "documentation":"

    The job's service role ARN.

    " @@ -1864,7 +2105,7 @@ "documentation":"

    The store's file format.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "sseConfig":{ @@ -1894,6 +2135,73 @@ "type":"list", "member":{"shape":"AnnotationStoreItem"} }, + "AnnotationStoreVersionItem":{ + "type":"structure", + "required":[ + "storeId", + "id", + "status", + "versionArn", + "name", + "versionName", + "description", + "creationTime", + "updateTime", + "statusMessage", + "versionSizeBytes" + ], + "members":{ + "storeId":{ + "shape":"ResourceId", + "documentation":"

    The store ID for an annotation store version.

    " + }, + "id":{ + "shape":"ResourceId", + "documentation":"

    The annotation store version ID.

    " + }, + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of an annotation store version.

    " + }, + "versionArn":{ + "shape":"Arn", + "documentation":"

    The Arn for an annotation store version.

    " + }, + "name":{ + "shape":"StoreName", + "documentation":"

    A name given to an annotation store version to distinguish it from others.

    " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of an annotation store version.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of an annotation store version.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The time stamp for when an annotation store version was created.

    " + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

    The time stamp for when an annotation store version was updated.

    " + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

    The status of an annotation store version.

    " + }, + "versionSizeBytes":{ + "shape":"Long", + "documentation":"

    The size of an annotation store version in Bytes.

    " + } + }, + "documentation":"

    Annotation store versions.

    " + }, + "AnnotationStoreVersionItems":{ + "type":"list", + "member":{"shape":"AnnotationStoreVersionItem"} + }, "AnnotationType":{ "type":"string", "enum":[ @@ -1912,6 +2220,12 @@ "min":20, "pattern":"arn:([^: ]*):([^: ]*):([^: ]*):([0-9]{12}):([^: ]*)" }, + "ArnList":{ + "type":"list", + "member":{"shape":"String"}, + "max":10, + "min":1 + }, "BatchDeleteReadSetRequest":{ "type":"structure", "required":[ @@ -2097,17 +2411,21 @@ "documentation":"

    The genome reference for the store's annotations.

    " }, "name":{ - "shape":"CreateAnnotationStoreRequestNameString", + "shape":"StoreName", "documentation":"

    A name for the store.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    A description for the store.

    " }, "tags":{ "shape":"TagMap", "documentation":"

    Tags for the store.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, "sseConfig":{ "shape":"SseConfig", "documentation":"

    Server-side encryption (SSE) settings for the store.

    " @@ -2122,16 +2440,13 @@ } } }, - "CreateAnnotationStoreRequestNameString":{ - "type":"string", - "pattern":"([a-z]){1}([a-z0-9_]){2,254}" - }, "CreateAnnotationStoreResponse":{ "type":"structure", "required":[ "id", "status", "name", + "versionName", "creationTime" ], "members":{ @@ -2159,12 +2474,88 @@ "shape":"String", "documentation":"

    The store's name.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, "creationTime":{ "shape":"CreationTime", "documentation":"

    When the store was created.

    " } } }, + "CreateAnnotationStoreVersionRequest":{ + "type":"structure", + "required":[ + "name", + "versionName" + ], + "members":{ + "name":{ + "shape":"StoreName", + "documentation":"

    The name of an annotation store version from which versions are being created.

    ", + "location":"uri", + "locationName":"name" + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of an annotation store version.

    " + }, + "versionOptions":{ + "shape":"VersionOptions", + "documentation":"

    The options for an annotation store version.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Any tags added to annotation store version.

    " + } + } + }, + "CreateAnnotationStoreVersionResponse":{ + "type":"structure", + "required":[ + "id", + "versionName", + "storeId", + "name", + "status", + "creationTime" + ], + "members":{ + "id":{ + "shape":"ResourceId", + "documentation":"

    A generated ID for the annotation store

    " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, + "storeId":{ + "shape":"ResourceId", + "documentation":"

    The ID for the annotation store from which new versions are being created.

    " + }, + "versionOptions":{ + "shape":"VersionOptions", + "documentation":"

    The options for an annotation store version.

    " + }, + "name":{ + "shape":"StoreName", + "documentation":"

    The name given to an annotation store version to distinguish it from other versions.

    " + }, + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of a annotation store version.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The time stamp for the creation of an annotation store version.

    " + } + } + }, "CreateMultipartReadSetUploadRequest":{ "type":"structure", "required":[ @@ -2482,6 +2873,44 @@ } } }, + "CreateShareRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "principalSubscriber" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    The resource ARN for the analytics store to be shared.

    " + }, + "principalSubscriber":{ + "shape":"String", + "documentation":"

    The principal subscriber is the account being given access to the analytics store data through the share offer.

    " + }, + "shareName":{ + "shape":"ShareName", + "documentation":"

    A name given to the share.

    " + } + } + }, + "CreateShareResponse":{ + "type":"structure", + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    An ID generated for the share.

    " + }, + "status":{ + "shape":"ShareStatus", + "documentation":"

    The status of a share.

    " + }, + "shareName":{ + "shape":"ShareName", + "documentation":"

    A name given to the share.

    " + } + } + }, "CreateVariantStoreRequest":{ "type":"structure", "required":["reference"], @@ -2491,11 +2920,11 @@ "documentation":"

    The genome reference for the store's variants.

    " }, "name":{ - "shape":"CreateVariantStoreRequestNameString", + "shape":"StoreName", "documentation":"

    A name for the store.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    A description for the store.

    " }, "tags":{ @@ -2508,10 +2937,6 @@ } } }, - "CreateVariantStoreRequestNameString":{ - "type":"string", - "pattern":"([a-z]){1}([a-z0-9_]){2,254}" - }, "CreateVariantStoreResponse":{ "type":"structure", "required":[ @@ -2660,6 +3085,40 @@ } } }, + "DeleteAnnotationStoreVersionsRequest":{ + "type":"structure", + "required":[ + "name", + "versions" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of the annotation store from which versions are being deleted.

    ", + "location":"uri", + "locationName":"name" + }, + "versions":{ + "shape":"VersionList", + "documentation":"

    The versions of an annotation store to be deleted.

    " + }, + "force":{ + "shape":"PrimitiveBoolean", + "documentation":"

    Forces the deletion of an annotation store version when imports are in-progress..

    ", + "location":"querystring", + "locationName":"force" + } + } + }, + "DeleteAnnotationStoreVersionsResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"VersionDeleteErrorList", + "documentation":"

    Any errors that occur when attempting to delete an annotation store version.

    " + } + } + }, "DeleteReferenceRequest":{ "type":"structure", "required":[ @@ -2744,6 +3203,27 @@ "members":{ } }, + "DeleteShareRequest":{ + "type":"structure", + "required":["shareId"], + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    The ID for the share request to be deleted.

    ", + "location":"uri", + "locationName":"shareId" + } + } + }, + "DeleteShareResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ShareStatus", + "documentation":"

    The status of the share being deleted.

    " + } + } + }, "DeleteVariantStoreRequest":{ "type":"structure", "required":["name"], @@ -2784,6 +3264,11 @@ } } }, + "Description":{ + "type":"string", + "max":500, + "min":0 + }, "Encoding":{ "type":"string", "max":20, @@ -2945,6 +3430,20 @@ "CRAM" ] }, + "Filter":{ + "type":"structure", + "members":{ + "resourceArns":{ + "shape":"ArnList", + "documentation":"

    The Amazon Resource Number (Arn) for an analytics store.

    " + }, + "status":{ + "shape":"StatusList", + "documentation":"

    The status of an annotation store version.

    " + } + }, + "documentation":"

    Use filters to focus the returned annotation store versions on a specific parameter, such as the status of the annotation store.

    " + }, "FormatOptions":{ "type":"structure", "members":{ @@ -3004,6 +3503,7 @@ "required":[ "id", "destinationName", + "versionName", "roleArn", "status", "statusMessage", @@ -3023,6 +3523,10 @@ "shape":"StoreName", "documentation":"

    The job's destination annotation store.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of the annotation store version.

    " + }, "roleArn":{ "shape":"Arn", "documentation":"

    The job's service role ARN.

    " @@ -3088,7 +3592,8 @@ "updateTime", "tags", "statusMessage", - "storeSizeBytes" + "storeSizeBytes", + "numVersions" ], "members":{ "id":{ @@ -3112,7 +3617,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "sseConfig":{ @@ -3135,17 +3640,113 @@ "shape":"StoreOptions", "documentation":"

    The store's parsing options.

    " }, - "storeFormat":{ - "shape":"StoreFormat", - "documentation":"

    The store's annotation file format.

    " + "storeFormat":{ + "shape":"StoreFormat", + "documentation":"

    The store's annotation file format.

    " + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

    A status message.

    " + }, + "storeSizeBytes":{ + "shape":"Long", + "documentation":"

    The store's size in bytes.

    " + }, + "numVersions":{ + "shape":"Integer", + "documentation":"

    An integer indicating how many versions of an annotation store exist.

    " + } + } + }, + "GetAnnotationStoreVersionRequest":{ + "type":"structure", + "required":[ + "name", + "versionName" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name given to an annotation store version to distinguish it from others.

    ", + "location":"uri", + "locationName":"name" + }, + "versionName":{ + "shape":"String", + "documentation":"

    The name given to an annotation store version to distinguish it from others.

    ", + "location":"uri", + "locationName":"versionName" + } + } + }, + "GetAnnotationStoreVersionResponse":{ + "type":"structure", + "required":[ + "storeId", + "id", + "status", + "versionArn", + "name", + "versionName", + "description", + "creationTime", + "updateTime", + "tags", + "statusMessage", + "versionSizeBytes" + ], + "members":{ + "storeId":{ + "shape":"ResourceId", + "documentation":"

    The store ID for annotation store version.

    " + }, + "id":{ + "shape":"ResourceId", + "documentation":"

    The annotation store version ID.

    " + }, + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of an annotation store version.

    " + }, + "versionArn":{ + "shape":"Arn", + "documentation":"

    The Arn for the annotation store.

    " + }, + "name":{ + "shape":"StoreName", + "documentation":"

    The name of the annotation store.

    " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version to distinguish it from others.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description for an annotation store version.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The time stamp for when an annotation store version was created.

    " + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

    The time stamp for when an annotation store version was updated.

    " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

    Any tags associated with an annotation store version.

    " + }, + "versionOptions":{ + "shape":"VersionOptions", + "documentation":"

    The options for an annotation store version.

    " }, "statusMessage":{ "shape":"StatusMessage", - "documentation":"

    A status message.

    " + "documentation":"

    The status of an annotation store version.

    " }, - "storeSizeBytes":{ + "versionSizeBytes":{ "shape":"Long", - "documentation":"

    The store's size in bytes.

    " + "documentation":"

    The size of the annotation store version in Bytes.

    " } } }, @@ -3919,6 +4520,10 @@ "accelerators":{ "shape":"Accelerators", "documentation":"

    The computational accelerator used to run the workflow.

    " + }, + "retentionMode":{ + "shape":"RunRetentionMode", + "documentation":"

    The run's retention mode.

    " } } }, @@ -4073,6 +4678,27 @@ } } }, + "GetShareRequest":{ + "type":"structure", + "required":["shareId"], + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    The generated ID for a share.

    ", + "location":"uri", + "locationName":"shareId" + } + } + }, + "GetShareResponse":{ + "type":"structure", + "members":{ + "share":{ + "shape":"ShareDetails", + "documentation":"

    An analytic store share details object. contains status, resourceArn, ownerId, etc.

    " + } + } + }, "GetVariantImportRequest":{ "type":"structure", "required":["jobId"], @@ -4195,7 +4821,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "sseConfig":{ @@ -4548,6 +5174,10 @@ "type":"list", "member":{"shape":"ImportReferenceSourceItem"} }, + "Integer":{ + "type":"integer", + "box":true + }, "InternalServerException":{ "type":"structure", "required":["message"], @@ -4652,6 +5282,68 @@ } } }, + "ListAnnotationStoreVersionsFilter":{ + "type":"structure", + "members":{ + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of an annotation store version.

    " + } + }, + "documentation":"

    Use filters to focus the returned annotation store versions on a specific parameter, such as the status of the annotation store.

    " + }, + "ListAnnotationStoreVersionsRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of an annotation store.

    ", + "location":"uri", + "locationName":"name" + }, + "maxResults":{ + "shape":"ListAnnotationStoreVersionsRequestMaxResultsInteger", + "documentation":"

    The maximum number of annotation store versions to return in one page of results.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"ListAnnotationStoreVersionsRequestNextTokenString", + "documentation":"

    Specifies the pagination token from a previous request to retrieve the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "filter":{ + "shape":"ListAnnotationStoreVersionsFilter", + "documentation":"

    A filter to apply to the list of annotation store versions.

    " + } + } + }, + "ListAnnotationStoreVersionsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListAnnotationStoreVersionsRequestNextTokenString":{ + "type":"string", + "max":10000, + "min":1 + }, + "ListAnnotationStoreVersionsResponse":{ + "type":"structure", + "members":{ + "annotationStoreVersions":{ + "shape":"AnnotationStoreVersionItems", + "documentation":"

    Lists all versions of an annotation store.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    Specifies the pagination token from a previous request to retrieve the next page of results.

    " + } + } + }, "ListAnnotationStoresFilter":{ "type":"structure", "members":{ @@ -5332,6 +6024,46 @@ } } }, + "ListSharesRequest":{ + "type":"structure", + "required":["resourceOwner"], + "members":{ + "resourceOwner":{ + "shape":"ResourceOwner", + "documentation":"

    The account that owns the analytics store shared.

    " + }, + "filter":{ + "shape":"Filter", + "documentation":"

    Attributes used to filter for a specific subset of shares.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    Next token returned in the response of a previous ListReadSetUploadPartsRequest call. Used to get the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

    The maximum number of shares to return in one page of results.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSharesResponse":{ + "type":"structure", + "required":["shares"], + "members":{ + "shares":{ + "shape":"ShareDetailsList", + "documentation":"

    The shares available and their meta details.

    " + }, + "nextToken":{ + "shape":"String", + "documentation":"

    Next token returned in the response of a previous ListSharesResponse call. Used to get the next page of results.

    " + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -6338,6 +7070,13 @@ }, "exception":true }, + "ResourceOwner":{ + "type":"string", + "enum":[ + "SELF", + "OTHER" + ] + }, "RoleArn":{ "type":"string", "max":2048, @@ -6589,6 +7328,15 @@ "key":{"shape":"RunResourceDigestKey"}, "value":{"shape":"RunResourceDigest"} }, + "RunRetentionMode":{ + "type":"string", + "enum":[ + "RETAIN", + "REMOVE" + ], + "max":64, + "min":1 + }, "RunRoleArn":{ "type":"string", "max":128, @@ -6784,6 +7532,69 @@ }, "exception":true }, + "ShareDetails":{ + "type":"structure", + "members":{ + "shareId":{ + "shape":"String", + "documentation":"

    The ID for a share offer for an analytics store .

    " + }, + "resourceArn":{ + "shape":"String", + "documentation":"

    The resource Arn of the analytics store being shared.

    " + }, + "principalSubscriber":{ + "shape":"String", + "documentation":"

    The principal subscriber is the account the analytics store data is being shared with.

    " + }, + "ownerId":{ + "shape":"String", + "documentation":"

    The account ID for the data owner. The owner creates the share offer.

    " + }, + "status":{ + "shape":"ShareStatus", + "documentation":"

    The status of a share.

    " + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

    The status message for a share. It provides more details on the status of the share.

    " + }, + "shareName":{ + "shape":"ShareName", + "documentation":"

    The name of the share.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The timestamp for when the share was created.

    " + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

    The timestamp of the share update.

    " + } + }, + "documentation":"

    The details of a share.

    " + }, + "ShareDetailsList":{ + "type":"list", + "member":{"shape":"ShareDetails"} + }, + "ShareName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ShareStatus":{ + "type":"string", + "enum":[ + "PENDING", + "ACTIVATING", + "ACTIVE", + "DELETING", + "DELETED", + "FAILED" + ] + }, "SourceFiles":{ "type":"structure", "required":["source1"], @@ -6840,6 +7651,10 @@ "shape":"AnnotationImportItemSources", "documentation":"

    Items to import.

    " }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of the annotation store version.

    " + }, "formatOptions":{ "shape":"FormatOptions", "documentation":"

    Formatting options for the annotation file.

    " @@ -7222,11 +8037,11 @@ }, "workflowType":{ "shape":"WorkflowType", - "documentation":"

    The run's workflows type.

    " + "documentation":"

    The run's workflow type.

    " }, "runId":{ "shape":"RunId", - "documentation":"

    The run's ID.

    " + "documentation":"

    The ID of a run to duplicate.

    " }, "roleArn":{ "shape":"RunRoleArn", @@ -7268,6 +8083,10 @@ "shape":"RunRequestId", "documentation":"

    To ensure that requests don't run multiple times, specify a unique ID for each request.

    ", "idempotencyToken":true + }, + "retentionMode":{ + "shape":"RunRetentionMode", + "documentation":"

    The retention mode for the run.

    " } } }, @@ -7344,16 +8163,15 @@ } } }, + "StatusList":{ + "type":"list", + "member":{"shape":"ShareStatus"} + }, "StatusMessage":{ "type":"string", "max":1000, "min":0 }, - "StoreDescription":{ - "type":"string", - "max":500, - "min":0 - }, "StoreFormat":{ "type":"string", "enum":[ @@ -7618,6 +8436,30 @@ "max":5000, "min":1 }, + "TsvVersionOptions":{ + "type":"structure", + "members":{ + "annotationType":{ + "shape":"AnnotationType", + "documentation":"

    The store version's annotation type.

    " + }, + "formatToHeader":{ + "shape":"FormatToHeader", + "documentation":"

    The annotation store version's header key to column name mapping.

    " + }, + "schema":{ + "shape":"TsvVersionOptionsSchemaList", + "documentation":"

    The TSV schema for an annotation store version.

    " + } + }, + "documentation":"

    The options for a TSV file.

    " + }, + "TsvVersionOptionsSchemaList":{ + "type":"list", + "member":{"shape":"SchemaItem"}, + "max":5000, + "min":1 + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -7655,7 +8497,7 @@ "locationName":"name" }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    A description for the store.

    " } } @@ -7689,7 +8531,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "creationTime":{ @@ -7710,6 +8552,78 @@ } } }, + "UpdateAnnotationStoreVersionRequest":{ + "type":"structure", + "required":[ + "name", + "versionName" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of an annotation store.

    ", + "location":"uri", + "locationName":"name" + }, + "versionName":{ + "shape":"String", + "documentation":"

    The name of an annotation store version.

    ", + "location":"uri", + "locationName":"versionName" + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of an annotation store.

    " + } + } + }, + "UpdateAnnotationStoreVersionResponse":{ + "type":"structure", + "required":[ + "storeId", + "id", + "status", + "name", + "versionName", + "description", + "creationTime", + "updateTime" + ], + "members":{ + "storeId":{ + "shape":"ResourceId", + "documentation":"

    The annotation store ID.

    " + }, + "id":{ + "shape":"ResourceId", + "documentation":"

    The annotation store version ID.

    " + }, + "status":{ + "shape":"VersionStatus", + "documentation":"

    The status of an annotation store version.

    " + }, + "name":{ + "shape":"StoreName", + "documentation":"

    The name of an annotation store.

    " + }, + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name of an annotation store version.

    " + }, + "description":{ + "shape":"Description", + "documentation":"

    The description of an annotation store version.

    " + }, + "creationTime":{ + "shape":"CreationTime", + "documentation":"

    The time stamp for when an annotation store version was created.

    " + }, + "updateTime":{ + "shape":"UpdateTime", + "documentation":"

    The time stamp for when an annotation store version was updated.

    " + } + } + }, "UpdateRunGroupRequest":{ "type":"structure", "required":["id"], @@ -7781,7 +8695,7 @@ "locationName":"name" }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    A description for the store.

    " } } @@ -7815,7 +8729,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "creationTime":{ @@ -8058,7 +8972,7 @@ "documentation":"

    The store's name.

    " }, "description":{ - "shape":"StoreDescription", + "shape":"Description", "documentation":"

    The store's description.

    " }, "sseConfig":{ @@ -8102,6 +9016,61 @@ }, "documentation":"

    Formatting options for a VCF file.

    " }, + "VersionDeleteError":{ + "type":"structure", + "required":[ + "versionName", + "message" + ], + "members":{ + "versionName":{ + "shape":"VersionName", + "documentation":"

    The name given to an annotation store version.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    The message explaining the error in annotation store deletion.

    " + } + }, + "documentation":"

    The error preventing deletion of the annotation store version.

    " + }, + "VersionDeleteErrorList":{ + "type":"list", + "member":{"shape":"VersionDeleteError"} + }, + "VersionList":{ + "type":"list", + "member":{"shape":"VersionName"}, + "max":10, + "min":1 + }, + "VersionName":{ + "type":"string", + "max":255, + "min":3, + "pattern":"([a-z]){1}([a-z0-9_]){2,254}" + }, + "VersionOptions":{ + "type":"structure", + "members":{ + "tsvVersionOptions":{ + "shape":"TsvVersionOptions", + "documentation":"

    File settings for a version of a TSV store.

    " + } + }, + "documentation":"

    The options for an annotation store version.

    ", + "union":true + }, + "VersionStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE", + "FAILED" + ] + }, "WorkflowArn":{ "type":"string", "max":128, diff --git a/services/omics/src/main/resources/codegen-resources/waiters-2.json b/services/omics/src/main/resources/codegen-resources/waiters-2.json index db1de32eedd..9e82e101dcb 100644 --- a/services/omics/src/main/resources/codegen-resources/waiters-2.json +++ b/services/omics/src/main/resources/codegen-resources/waiters-2.json @@ -81,6 +81,54 @@ "expected" : "DELETING" } ] }, + "AnnotationStoreVersionCreated" : { + "description" : "Wait until an annotation store version is created", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetAnnotationStoreVersion", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "ACTIVE" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "CREATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "UPDATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "FAILED" + } ] + }, + "AnnotationStoreVersionDeleted" : { + "description" : "Wait until an annotation store version is deleted.", + "delay" : 30, + "maxAttempts" : 20, + "operation" : "GetAnnotationStoreVersion", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "DELETED" + }, { + "matcher" : "error", + "state" : "success", + "expected" : "ResourceNotFoundException" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "DELETING" + } ] + }, "ReadSetActivationJobCompleted" : { "description" : "Wait until a job is completed.", "delay" : 30, diff --git a/services/opensearch/pom.xml b/services/opensearch/pom.xml index b1436890b8b..8931287ba8a 100644 --- a/services/opensearch/pom.xml +++ b/services/opensearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT opensearch AWS Java SDK :: Services :: Open Search diff --git a/services/opensearchserverless/pom.xml b/services/opensearchserverless/pom.xml index 4151ba0b6d0..ce00fb11cec 100644 --- a/services/opensearchserverless/pom.xml +++ b/services/opensearchserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT opensearchserverless AWS Java SDK :: Services :: Open Search Serverless diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index 87bcec4cbbc..2273aef0dd7 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworks/src/main/resources/codegen-resources/customization.config b/services/opsworks/src/main/resources/codegen-resources/customization.config index 197b17d901a..322f97faeed 100644 --- a/services/opsworks/src/main/resources/codegen-resources/customization.config +++ b/services/opsworks/src/main/resources/codegen-resources/customization.config @@ -6,7 +6,7 @@ "describeStacks", "describeUserProfiles" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "updateMyUserProfile", "describeAgentVersions", "describeApps", diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index 081f6a9cee4..315d65ddf46 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 48ff8ed2127..1102d2aa359 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/organizations/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/organizations/src/main/resources/codegen-resources/endpoint-rule-set.json index b31c5354663..af8bfbf83c1 100644 --- a/services/organizations/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/organizations/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,1048 +115,455 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "stringEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } + "name" ] }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "endpoint": { - "url": "https://organizations.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "us-east-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "PartitionResult" }, - "aws-cn" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } + "name" ] }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.amazonaws.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] + "ref": "UseFIPS" }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations.{Region}.api.amazonwebservices.com.cn", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations-fips.us-east-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "endpoint": { - "url": "https://organizations.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "us-east-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-us-gov" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + "ref": "PartitionResult" }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } + "name" ] }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] + "ref": "UseFIPS" }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations.{Region}.api.aws", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations.cn-northwest-1.amazonaws.com.cn", + "properties": { + "authSchemes": [ { - "conditions": [], - "endpoint": { - "url": "https://organizations.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "cn-northwest-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseFIPS" + "ref": "PartitionResult" }, - true + "name" ] }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations.us-gov-west-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "us-gov-west-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [ + "fn": "stringEquals", + "argv": [ { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "ref": "UseFIPS" + "ref": "PartitionResult" }, - true + "name" ] - } - ], - "type": "tree", - "rules": [ + }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-global" - ] - } - ], - "endpoint": { - "url": "https://organizations-fips.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "aws-us-gov-global" - ] - } - ], - "endpoint": { - "url": "https://organizations.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://organizations-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, + false + ] + } + ], + "endpoint": { + "url": "https://organizations.us-gov-west-1.amazonaws.com", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "name": "sigv4", + "signingName": "organizations", + "signingRegion": "us-gov-west-1" } ] }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://organizations.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] }, { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "aws-global" + "supportsDualStack" ] } - ], + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], "endpoint": { - "url": "https://organizations.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-east-1" - } - ] - }, + "url": "https://organizations-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "aws-cn-global" + "supportsFIPS" ] } - ], + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], "endpoint": { - "url": "https://organizations.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "cn-northwest-1" - } - ] - }, + "url": "https://organizations-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, "headers": {} }, "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "stringEquals", + "fn": "getAttr", "argv": [ { - "ref": "Region" + "ref": "PartitionResult" }, - "aws-us-gov-global" + "supportsDualStack" ] } - ], - "endpoint": { - "url": "https://organizations.us-gov-west-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "organizations", - "signingRegion": "us-gov-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [], "endpoint": { - "url": "https://organizations.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://organizations.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://organizations.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/organizations/src/main/resources/codegen-resources/endpoint-tests.json b/services/organizations/src/main/resources/codegen-resources/endpoint-tests.json index 9f0b8e8ae1d..d84bb7c7e76 100644 --- a/services/organizations/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/organizations/src/main/resources/codegen-resources/endpoint-tests.json @@ -17,9 +17,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-global", "UseFIPS": false, - "Region": "aws-global" + "UseDualStack": false } }, { @@ -39,9 +39,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-global", "UseFIPS": true, - "Region": "aws-global" + "UseDualStack": false } }, { @@ -52,9 +52,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -74,9 +74,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -87,9 +87,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -109,9 +109,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -131,9 +131,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-cn-global", "UseFIPS": false, - "Region": "aws-cn-global" + "UseDualStack": false } }, { @@ -144,9 +144,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -157,9 +157,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -170,9 +170,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -192,9 +192,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -214,9 +214,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-us-gov-global", "UseFIPS": false, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { @@ -236,9 +236,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "aws-us-gov-global", "UseFIPS": true, - "Region": "aws-us-gov-global" + "UseDualStack": false } }, { @@ -249,9 +249,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -271,9 +271,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -284,9 +284,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -306,9 +306,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -319,9 +330,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -332,9 +354,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -345,9 +378,20 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -358,9 +402,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -371,9 +415,9 @@ } }, "params": { - "UseDualStack": false, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -385,8 +429,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -396,9 +440,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -408,11 +452,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/organizations/src/main/resources/codegen-resources/service-2.json b/services/organizations/src/main/resources/codegen-resources/service-2.json index 699b8c7b3a8..8cf73c8e9c1 100644 --- a/services/organizations/src/main/resources/codegen-resources/service-2.json +++ b/services/organizations/src/main/resources/codegen-resources/service-2.json @@ -34,7 +34,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

    Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

    You can only call this operation by the following principals when they also have the relevant IAM permissions:

    • Invitation to join or Approve all features request handshakes: only a principal from the member account.

      The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.

    • Enable all features final confirmation handshake: only a principal from the management account.

      For more information about invitations, see Inviting an Amazon Web Services account to join your organization in the Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling all features in your organization in the Organizations User Guide.

    After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.

    " + "documentation":"

    Sends a response to the originator of a handshake agreeing to the action proposed by the handshake request.

    You can only call this operation by the following principals when they also have the relevant IAM permissions:

    • Invitation to join or Approve all features request handshakes: only a principal from the member account.

      The user who calls the API for an invitation to join must have the organizations:AcceptHandshake permission. If you enabled all features in the organization, the user must also have the iam:CreateServiceLinkedRole permission so that Organizations can create the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide.

    • Enable all features final confirmation handshake: only a principal from the management account.

      For more information about invitations, see Inviting an Amazon Web Services account to join your organization in the Organizations User Guide. For more information about requests to enable all features in the organization, see Enabling all features in your organization in the Organizations User Guide.

    After you accept a handshake, it continues to appear in the results of relevant APIs for only 30 days. After that, it's deleted.

    " }, "AttachPolicy":{ "name":"AttachPolicy", @@ -58,7 +58,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type:

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type:

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "CancelHandshake":{ "name":"CancelHandshake", @@ -100,7 +100,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following:

    • Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation.

      While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED.

    • Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

    • You can close only 10% of member accounts, between 10 and 200, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account.

      After you reach this limit, you can close additional accounts in the Billing console. For more information, see Closing an account in the Amazon Web Services Billing and Cost Management User Guide.

    • To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status.

    • If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide.

    For more information about closing accounts, see Closing an Amazon Web Services account in the Organizations User Guide.

    " + "documentation":"

    Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following:

    • Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation.

      While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED.

    • Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

    • You can close only 10% of member accounts, between 10 and 200, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can close additional accounts. For more information, see Closing a member account in your organization in the Organizations User Guide.

    • To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status.

    • If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide.

    " }, "CreateAccount":{ "name":"CreateAccount", @@ -121,7 +121,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

    • Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

    • Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

    The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account.

    This operation can be called only from the organization's management account.

    For more information about creating accounts, see Creating an Amazon Web Services account in Your Organization in the Organizations User Guide.

    • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the Organizations User Guide.

    • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

    • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

    • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an Amazon Web Services account in the Organizations User Guide.

    When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

    " + "documentation":"

    Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

    • Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

    • Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide.

    The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account.

    This operation can be called only from the organization's management account.

    For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide.

    • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method and signing the end user license agreement (EULA) is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

    • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

    • Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide.

    When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools.

    " }, "CreateGovCloudAccount":{ "name":"CreateGovCloudAccount", @@ -142,7 +142,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    This action is available if all of the following are true:

    • You're authorized to create accounts in the Amazon Web Services GovCloud (US) Region. For more information on the Amazon Web Services GovCloud (US) Region, see the Amazon Web Services GovCloud User Guide.

    • You already have an account in the Amazon Web Services GovCloud (US) Region that is paired with a management account of an organization in the commercial Region.

    • You call this action from the management account of your organization in the commercial Region.

    • You have the organizations:CreateGovCloudAccount permission.

    Organizations automatically creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and Service-Linked Roles in the Organizations User Guide.

    Amazon Web Services automatically enables CloudTrail for Amazon Web Services GovCloud (US) accounts, but you should also do the following:

    • Verify that CloudTrail is enabled to store logs.

    • Create an Amazon S3 bucket for CloudTrail log storage.

      For more information, see Verifying CloudTrail Is Enabled in the Amazon Web Services GovCloud User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission. The tags are attached to the commercial account associated with the GovCloud account, rather than the GovCloud account itself. To add tags to the GovCloud account, call the TagResource operation in the GovCloud Region after the new GovCloud account exists.

    You call this action from the management account of your organization in the commercial Region to create a standalone Amazon Web Services account in the Amazon Web Services GovCloud (US) Region. After the account is created, the management account of an organization in the Amazon Web Services GovCloud (US) Region can invite it to that organization. For more information on inviting standalone accounts in the Amazon Web Services GovCloud (US) to join an organization, see Organizations in the Amazon Web Services GovCloud User Guide.

    Calling CreateGovCloudAccount is an asynchronous request that Amazon Web Services performs in the background. Because CreateGovCloudAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

    When you call the CreateGovCloudAccount action, you create two accounts: a standalone account in the Amazon Web Services GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated with the same email address.

    A role is created in the new account in the commercial Region that allows the management account in the organization in the commercial Region to assume it. An Amazon Web Services GovCloud (US) account is then created and associated with the commercial account that you just created. A role is also created in the new Amazon Web Services GovCloud (US) account that can be assumed by the Amazon Web Services GovCloud (US) account that is associated with the management account of the commercial organization. For more information and to view a diagram that explains how account access works, see Organizations in the Amazon Web Services GovCloud User Guide.

    For more information about creating accounts, see Creating an Amazon Web Services account in Your Organization in the Organizations User Guide.

    • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account is not automatically collected. This includes a payment method and signing the end user license agreement (EULA). If you must remove an account from your organization later, you can do so only after you provide the missing information. Follow the steps at To leave an organization as a member account in the Organizations User Guide.

    • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

    • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

    • Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Amazon Web Services Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing an Amazon Web Services account in the Organizations User Guide.

    When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting Access to Your Billing Information and Tools.

    " + "documentation":"

    This action is available if all of the following are true:

    • You're authorized to create accounts in the Amazon Web Services GovCloud (US) Region. For more information on the Amazon Web Services GovCloud (US) Region, see the Amazon Web Services GovCloud User Guide.

    • You already have an account in the Amazon Web Services GovCloud (US) Region that is paired with a management account of an organization in the commercial Region.

    • You call this action from the management account of your organization in the commercial Region.

    • You have the organizations:CreateGovCloudAccount permission.

    Organizations automatically creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide.

    Amazon Web Services automatically enables CloudTrail for Amazon Web Services GovCloud (US) accounts, but you should also do the following:

    • Verify that CloudTrail is enabled to store logs.

    • Create an Amazon S3 bucket for CloudTrail log storage.

      For more information, see Verifying CloudTrail Is Enabled in the Amazon Web Services GovCloud User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission. The tags are attached to the commercial account associated with the GovCloud account, rather than the GovCloud account itself. To add tags to the GovCloud account, call the TagResource operation in the GovCloud Region after the new GovCloud account exists.

    You call this action from the management account of your organization in the commercial Region to create a standalone Amazon Web Services account in the Amazon Web Services GovCloud (US) Region. After the account is created, the management account of an organization in the Amazon Web Services GovCloud (US) Region can invite it to that organization. For more information on inviting standalone accounts in the Amazon Web Services GovCloud (US) to join an organization, see Organizations in the Amazon Web Services GovCloud User Guide.

    Calling CreateGovCloudAccount is an asynchronous request that Amazon Web Services performs in the background. Because CreateGovCloudAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following:

    When you call the CreateGovCloudAccount action, you create two accounts: a standalone account in the Amazon Web Services GovCloud (US) Region and an associated account in the commercial Region for billing and support purposes. The account in the commercial Region is automatically a member of the organization whose credentials made the request. Both accounts are associated with the same email address.

    A role is created in the new account in the commercial Region that allows the management account in the organization in the commercial Region to assume it. An Amazon Web Services GovCloud (US) account is then created and associated with the commercial account that you just created. A role is also created in the new Amazon Web Services GovCloud (US) account that can be assumed by the Amazon Web Services GovCloud (US) account that is associated with the management account of the commercial organization. For more information and to view a diagram that explains how account access works, see Organizations in the Amazon Web Services GovCloud User Guide.

    For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide.

    • When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account is not automatically collected. This includes a payment method and signing the end user license agreement (EULA). If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support.

    • If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support.

    • Using CreateGovCloudAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Amazon Web Services Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide.

    When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools.

    " }, "CreateOrganization":{ "name":"CreateOrganization", @@ -162,7 +162,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"AccessDeniedForDependencyException"} ], - "documentation":"

    Creates an Amazon Web Services organization. The account whose user is calling the CreateOrganization operation automatically becomes the management account of the new organization.

    This operation must be called using credentials from the account that is to become the new organization's management account. The principal must also have the relevant IAM permissions.

    By default (or if you set the FeatureSet parameter to ALL), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING\", no policy types are enabled by default, and you can't use organization policies

    " + "documentation":"

    Creates an Amazon Web Services organization. The account whose user is calling the CreateOrganization operation automatically becomes the management account of the new organization.

    This operation must be called using credentials from the account that is to become the new organization's management account. The principal must also have the relevant IAM permissions.

    By default (or if you set the FeatureSet parameter to ALL), the new organization is created with all features enabled and service control policies automatically enabled in the root. If you instead choose to create the organization supporting only the consolidated billing features by setting the FeatureSet parameter to CONSOLIDATED_BILLING, no policy types are enabled by default and you can't use organization policies.

    " }, "CreateOrganizationalUnit":{ "name":"CreateOrganizationalUnit", @@ -183,7 +183,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Creates an organizational unit (OU) within a root or parent OU. An OU is a container for accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five.

    For more information about OUs, see Managing Organizational Units in the Organizations User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Creates an organizational unit (OU) within a root or parent OU. An OU is a container for accounts that enables you to organize your accounts to apply policies according to your business requirements. The number of levels deep that you can nest OUs is dependent upon the policy types enabled for that root. For service control policies, the limit is five.

    For more information about OUs, see Managing organizational units (OUs) in the Organizations User Guide.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " }, "CreatePolicy":{ "name":"CreatePolicy", @@ -206,7 +206,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual Amazon Web Services account.

    For more information about policies and their use, see Managing Organization Policies.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual Amazon Web Services account.

    For more information about policies and their use, see Managing Organizations policies.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "DeclineHandshake":{ "name":"DeclineHandshake", @@ -282,7 +282,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", @@ -379,7 +379,7 @@ {"shape":"InvalidInputException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account.

    This operation applies only to policy types other than service control policies (SCPs).

    For more information about policy inheritance, see How Policy Inheritance Works in the Organizations User Guide.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " + "documentation":"

    Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account.

    This operation applies only to policy types other than service control policies (SCPs).

    For more information about policy inheritance, see Understanding management policy inheritance in the Organizations User Guide.

    This operation can be called from any account in the organization.

    " }, "DescribeHandshake":{ "name":"DescribeHandshake", @@ -468,7 +468,7 @@ {"shape":"ResourcePolicyNotFoundException"}, {"shape":"ConstraintViolationException"} ], - "documentation":"

    Retrieves information about a resource policy.

    You can only call this operation from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " + "documentation":"

    Retrieves information about a resource policy.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "DetachPolicy":{ "name":"DetachPolicy", @@ -491,7 +491,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Detaches a policy from a target root, organizational unit (OU), or account.

    If the policy being detached is a service control policy (SCP), the changes to permissions for Identity and Access Management (IAM) users and roles in affected accounts are immediate.

    Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Detaches a policy from a target root, organizational unit (OU), or account.

    If the policy being detached is a service control policy (SCP), the changes to permissions for Identity and Access Management (IAM) users and roles in affected accounts are immediate.

    Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "DisableAWSServiceAccess":{ "name":"DisableAWSServiceAccess", @@ -510,7 +510,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Disables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from Organizations.

    We strongly recommend that you don't use this command to disable integration between Organizations and the specified Amazon Web Services service. Instead, use the console or commands that are provided by the specified service. This lets the trusted service perform any required initialization when enabling trusted access, such as creating any required resources and any required clean up of resources when disabling trusted access.

    For information about how to disable trusted service access to your organization using the trusted service, see the Learn more link under the Supports Trusted Access column at Amazon Web Services services that you can use with Organizations. on this page.

    If you disable access by using this command, it causes the following actions to occur:

    • The service can no longer create a service-linked role in the accounts in your organization. This means that the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from Organizations.

    • The service can no longer perform tasks in the member accounts in the organization, unless those operations are explicitly permitted by the IAM policies that are attached to your roles. This includes any data aggregation from the member accounts to the management account, or to a delegated administrator account, where relevant.

    • Some services detect this and clean up any remaining data or resources related to the integration, while other services stop accessing the organization but leave any historical data and configuration in place to support a possible re-enabling of the integration.

    Using the other service's console or commands to disable the integration ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

    After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts

    For more information about integrating other services with Organizations, including the list of services that work with Organizations, see Integrating Organizations with Other Amazon Web Services Services in the Organizations User Guide.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Disables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you disable integration, the specified service no longer can create a service-linked role in new accounts in your organization. This means the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from Organizations.

    We strongly recommend that you don't use this command to disable integration between Organizations and the specified Amazon Web Services service. Instead, use the console or commands that are provided by the specified service. This lets the trusted service perform any required initialization when enabling trusted access, such as creating any required resources and any required clean up of resources when disabling trusted access.

    For information about how to disable trusted service access to your organization using the trusted service, see the Learn more link under the Supports Trusted Access column at Amazon Web Services services that you can use with Organizations. on this page.

    If you disable access by using this command, it causes the following actions to occur:

    • The service can no longer create a service-linked role in the accounts in your organization. This means that the service can't perform operations on your behalf on any new accounts in your organization. The service can still perform operations in older accounts until the service completes its clean-up from Organizations.

    • The service can no longer perform tasks in the member accounts in the organization, unless those operations are explicitly permitted by the IAM policies that are attached to your roles. This includes any data aggregation from the member accounts to the management account, or to a delegated administrator account, where relevant.

    • Some services detect this and clean up any remaining data or resources related to the integration, while other services stop accessing the organization but leave any historical data and configuration in place to support a possible re-enabling of the integration.

    Using the other service's console or commands to disable the integration ensures that the other service is aware that it can clean up any resources that are required only for the integration. How the service cleans up its resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

    After you perform the DisableAWSServiceAccess operation, the specified service can no longer perform operations in your organization's accounts

    For more information about integrating other services with Organizations, including the list of services that work with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

    This operation can be called only from the organization's management account.

    " }, "DisablePolicyType":{ "name":"DisablePolicyType", @@ -533,7 +533,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

    This is an asynchronous request that Amazon Web Services performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

    This operation can be called only from the organization's management account.

    To view the status of available policy types in the organization, use DescribeOrganization.

    " + "documentation":"

    Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

    This is an asynchronous request that Amazon Web Services performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    To view the status of available policy types in the organization, use DescribeOrganization.

    " }, "EnableAWSServiceAccess":{ "name":"EnableAWSServiceAccess", @@ -552,7 +552,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.

    We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

    For more information about enabling services to integrate with Organizations, see Integrating Organizations with Other Amazon Web Services Services in the Organizations User Guide.

    You can only call this operation from the organization's management account and only if the organization has enabled all features.

    " + "documentation":"

    Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts.

    We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

    For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

    You can only call this operation from the organization's management account and only if the organization has enabled all features.

    " }, "EnableAllFeatures":{ "name":"EnableAllFeatures", @@ -571,7 +571,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that Organizations supports. For more information, see Enabling All Features in Your Organization in the Organizations User Guide.

    This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

    After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

    After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

    After you enable all features in your organization, the management account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The management account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Enables all features in an organization. This enables the use of organization policies that can restrict the services and actions that can be called in each account. Until you enable all features, you have access only to consolidated billing, and you can't use any of the advanced account administration features that Organizations supports. For more information, see Enabling all features in your organization in the Organizations User Guide.

    This operation is required only for organizations that were created explicitly with only the consolidated billing features enabled. Calling this operation sends a handshake to every invited account in the organization. The feature set change can be finalized and the additional features enabled only after all administrators in the invited accounts approve the change by accepting the handshake.

    After you enable all features, you can separately enable or disable individual policy types in a root using EnablePolicyType and DisablePolicyType. To see the status of policy types in a root, use ListRoots.

    After all invited member accounts accept the handshake, you finalize the feature set change by accepting the handshake that contains \"Action\": \"ENABLE_ALL_FEATURES\". This completes the change.

    After you enable all features in your organization, the management account in the organization can apply policies on all member accounts. These policies can restrict what users and even administrators in those accounts can do. The management account can apply policies that prevent accounts from leaving the organization. Ensure that your account administrators are aware of this.

    This operation can be called only from the organization's management account.

    " }, "EnablePolicyType":{ "name":"EnablePolicyType", @@ -595,7 +595,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

    This is an asynchronous request that Amazon Web Services performs in the background. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

    This operation can be called only from the organization's management account.

    You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

    " + "documentation":"

    Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

    This is an asynchronous request that Amazon Web Services performs in the background. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

    " }, "InviteAccountToOrganization":{ "name":"InviteAccountToOrganization", @@ -618,7 +618,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Sends an invitation to another account to join your organization as a member account. Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

    • You can invite Amazon Web Services accounts only from the same seller as the management account. For example, if your organization's management account was created by Amazon Internet Services Pvt. Ltd (AISPL), an Amazon Web Services seller in India, you can invite only other AISPL accounts to your organization. You can't combine accounts from AISPL and Amazon Web Services or from any other Amazon Web Services seller. For more information, see Consolidated Billing in India.

    • If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact Amazon Web Services Support.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Sends an invitation to another account to join your organization as a member account. Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

    • You can invite Amazon Web Services accounts only from the same seller as the management account. For example, if your organization's management account was created by Amazon Internet Services Pvt. Ltd (AISPL), an Amazon Web Services seller in India, you can invite only other AISPL accounts to your organization. You can't combine accounts from AISPL and Amazon Web Services or from any other Amazon Web Services seller. For more information, see Consolidated billing in India.

    • If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact Amazon Web Services Support.

    If the request includes tags, then the requester must have the organizations:TagResource permission.

    This operation can be called only from the organization's management account.

    " }, "LeaveOrganization":{ "name":"LeaveOrganization", @@ -637,7 +637,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

    This operation can be called only from a member account in the organization.

    • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

    • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

      • Choose a support plan

      • Provide and verify the required contact information

      • Provide a current payment method

      Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. Follow the steps at To leave an organization when all required account information has not yet been provided in the Organizations User Guide.

    • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

    • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see Activating Access to the Billing and Cost Management Console in the Amazon Web Services Billing and Cost Management User Guide.

    • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

    • A newly created account has a waiting period before it can be removed from its organization. If you get an error that indicates that a wait period is required, then try again in a few days.

    " + "documentation":"

    Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

    This operation can be called only from a member account in the organization.

    • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

    • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

      • Choose a support plan

      • Provide and verify the required contact information

      • Provide a current payment method

      Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

    • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide.

    • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

    • A newly created account has a waiting period before it can be removed from its organization. If you get an error that indicates that a wait period is required, then try again in a few days.

    • If you are using an organization principal to call LeaveOrganization across multiple accounts, you can only do this up to 5 accounts per second in a single organization.

    " }, "ListAWSServiceAccessForOrganization":{ "name":"ListAWSServiceAccessForOrganization", @@ -656,7 +656,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

    Returns a list of the Amazon Web Services services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

    For more information about integrating other services with Organizations, including the list of services that currently work with Organizations, see Integrating Organizations with Other Amazon Web Services Services in the Organizations User Guide.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " + "documentation":"

    Returns a list of the Amazon Web Services services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

    For more information about integrating other services with Organizations, including the list of services that currently work with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "ListAccounts":{ "name":"ListAccounts", @@ -1011,7 +1011,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Removes the specified account from the organization.

    The removed account becomes a standalone account that isn't a member of any organization. It's no longer subject to any policies and is responsible for its own bill payments. The organization's management account is no longer charged for any expenses accrued by the member account after it's removed from the organization.

    This operation can be called only from the organization's management account. Member accounts can remove themselves with LeaveOrganization instead.

    • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For an account that you want to make standalone, you must choose a support plan, provide and verify the required contact information, and provide a current payment method. Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. To remove an account that doesn't yet have this information, you must sign in as the member account and follow the steps at To leave an organization when all required account information has not yet been provided in the Organizations User Guide.

    • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

    • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

    " + "documentation":"

    Removes the specified account from the organization.

    The removed account becomes a standalone account that isn't a member of any organization. It's no longer subject to any policies and is responsible for its own bill payments. The organization's management account is no longer charged for any expenses accrued by the member account after it's removed from the organization.

    This operation can be called only from the organization's management account. Member accounts can remove themselves with LeaveOrganization instead.

    • You can remove an account from your organization only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

    • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

    " }, "TagResource":{ "name":"TagResource", @@ -1030,7 +1030,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Adds one or more tags to the specified resource.

    Currently, you can attach tags to the following resources in Organizations.

    • Amazon Web Services account

    • Organization root

    • Organizational unit (OU)

    • Policy (any type)

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Adds one or more tags to the specified resource.

    Currently, you can attach tags to the following resources in Organizations.

    • Amazon Web Services account

    • Organization root

    • Organizational unit (OU)

    • Policy (any type)

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "UntagResource":{ "name":"UntagResource", @@ -1049,7 +1049,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Removes any tags with the specified keys from the specified resource.

    You can attach tags to the following resources in Organizations.

    • Amazon Web Services account

    • Organization root

    • Organizational unit (OU)

    • Policy (any type)

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Removes any tags with the specified keys from the specified resource.

    You can attach tags to the following resources in Organizations.

    • Amazon Web Services account

    • Organization root

    • Organizational unit (OU)

    • Policy (any type)

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " }, "UpdateOrganizationalUnit":{ "name":"UpdateOrganizationalUnit", @@ -1093,7 +1093,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

    Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type.

    This operation can be called only from the organization's management account.

    " + "documentation":"

    Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type.

    This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

    " } }, "shapes":{ @@ -1129,7 +1129,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

    ", + "documentation":"

    You don't have permissions to perform the requested operation. The user or role that is making the request must have at least one IAM permissions policy attached that grants the required permissions. For more information, see Access Management in the IAM User Guide.

    ", "exception":true }, "AccessDeniedForDependencyException":{ @@ -1239,7 +1239,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    You can't invite an existing account to your organization until you verify that you own the email address associated with the management account. For more information, see Email Address Verification in the Organizations User Guide.

    ", + "documentation":"

    You can't invite an existing account to your organization until you verify that you own the email address associated with the management account. For more information, see Email address verification in the Organizations User Guide.

    ", "exception":true }, "AccountStatus":{ @@ -1378,7 +1378,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

    Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

    Some of the reasons in the following list might not be applicable to this specific API or operation.

    • ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

    • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide.

    • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

    • ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization.

    • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit.

      Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts.

      Deleted and closed accounts still count toward your limit.

      If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support.

    • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator.

    • CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​

    • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

    • CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days.

    • CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​

    • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

    • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

    • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

    • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

    • INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments.

    • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

    • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services /> Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support.

    • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

    • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide.

    • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the Organizations User Guide.

    • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

    • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

    • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

    • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. Follow the steps at To leave an organization when all required account information has not yet been provided in the Organizations User Guide.

    • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

    • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

    • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

    • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

    • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

    • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

    • SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first.

    • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

    • WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, there is a waiting period before you can remove it from the organization. If you get an error that indicates that a wait period is required, try again in a few days.

    ", + "documentation":"

    Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

    Some of the reasons in the following list might not be applicable to this specific API or operation.

    • ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

    • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide.

    • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

    • ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization.

    • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit.

      Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts.

      Deleted and closed accounts still count toward your limit.

      If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support.

    • CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator.

    • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator.

    • CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​

    • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

    • CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days.

    • CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​

    • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

    • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

    • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

    • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

    • INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments.

    • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be associated with the same marketplace.

    • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services /> Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support.

    • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

    • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide.

    • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

    • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

    • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

    • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

    • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

    • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

    • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

    • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

    • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

    • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

    • SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first.

    • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

    • WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, there is a waiting period before you can remove it from the organization. If you get an error that indicates that a wait period is required, try again in a few days.

    ", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1464,11 +1464,11 @@ }, "RoleName":{ "shape":"RoleName", - "documentation":"

    The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

    If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

    For more information about how to use this role to access the member account, see the following links:

    The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

    " + "documentation":"

    The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

    If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

    For more information about how to use this role to access the member account, see the following links:

    The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

    " }, "IamUserAccessToBilling":{ "shape":"IAMUserAccessToBilling", - "documentation":"

    If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see Activating Access to the Billing and Cost Management Console in the Amazon Web Services Billing and Cost Management User Guide.

    If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

    " + "documentation":"

    If set to ALLOW, the new account enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide.

    If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

    " }, "Tags":{ "shape":"Tags", @@ -1486,7 +1486,7 @@ "members":{ "CreateAccountStatus":{ "shape":"CreateAccountStatus", - "documentation":"

    A structure that contains details about the request to create an account. This response structure might not be fully populated when you first receive it because account creation is an asynchronous process. You can pass the returned CreateAccountStatus ID as a parameter to DescribeCreateAccountStatus to get status about the progress of the request at later times. You can also check the CloudTrail log for the CreateAccountResult event. For more information, see Monitoring the Activity in Your Organization in the Organizations User Guide.

    " + "documentation":"

    A structure that contains details about the request to create an account. This response structure might not be fully populated when you first receive it because account creation is an asynchronous process. You can pass the returned CreateAccountStatus ID as a parameter to DescribeCreateAccountStatus to get status about the progress of the request at later times. You can also check the CloudTrail log for the CreateAccountResult event. For more information, see Logging and monitoring in Organizations in the Organizations User Guide.

    " } } }, @@ -1569,11 +1569,11 @@ }, "RoleName":{ "shape":"RoleName", - "documentation":"

    (Optional)

    The name of an IAM role that Organizations automatically preconfigures in the new member accounts in both the Amazon Web Services GovCloud (US) Region and in the commercial Region. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

    If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

    For more information about how to use this role to access the member account, see Accessing and Administering the Member Accounts in Your Organization in the Organizations User Guide and steps 2 and 3 in Tutorial: Delegate Access Across Amazon Web Services accounts Using IAM Roles in the IAM User Guide.

    The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

    " + "documentation":"

    (Optional)

    The name of an IAM role that Organizations automatically preconfigures in the new member accounts in both the Amazon Web Services GovCloud (US) Region and in the commercial Region. This role trusts the management account, allowing users in the management account to assume the role, as permitted by the management account administrator. The role has administrator permissions in the new member account.

    If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole.

    For more information about how to use this role to access the member account, see the following links:

    The regex pattern that is used to validate this parameter. The pattern can include uppercase letters, lowercase letters, digits with no spaces, and any of the following characters: =,.@-

    " }, "IamUserAccessToBilling":{ "shape":"IAMUserAccessToBilling", - "documentation":"

    If set to ALLOW, the new linked account in the commercial Region enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see Activating Access to the Billing and Cost Management Console in the Amazon Web Services Billing and Cost Management User Guide.

    If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

    " + "documentation":"

    If set to ALLOW, the new linked account in the commercial Region enables IAM users to access account billing information if they have the required permissions. If set to DENY, only the root user of the new account can access account billing information. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide.

    If you don't specify this parameter, the value defaults to ALLOW, and IAM users and roles with the required permissions can access billing information for the new account.

    " }, "Tags":{ "shape":"Tags", @@ -1592,7 +1592,7 @@ "members":{ "FeatureSet":{ "shape":"OrganizationFeatureSet", - "documentation":"

    Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

    • CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the management account. For more information, see Consolidated billing in the Organizations User Guide.

      The consolidated billing feature subset isn't available for organizations in the Amazon Web Services GovCloud (US) Region.

    • ALL: In addition to all the features supported by the consolidated billing feature set, the management account can also apply any policy type to any member account in the organization. For more information, see All features in the Organizations User Guide.

    " + "documentation":"

    Specifies the feature set supported by the new organization. Each feature set supports different levels of functionality.

    • CONSOLIDATED_BILLING: All member accounts have their bills consolidated to and paid by the management account. For more information, see Consolidated billing in the Organizations User Guide.

      The consolidated billing feature subset isn't available for organizations in the Amazon Web Services GovCloud (US) Region.

    • ALL: In addition to all the features supported by the consolidated billing feature set, the management account can also apply any policy type to any member account in the organization. For more information, see All features in the Organizations User Guide.

    " } } }, @@ -2211,7 +2211,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"HandshakeConstraintViolationExceptionReason"} }, - "documentation":"

    The requested operation would violate the constraint identified in the reason code.

    Some of the reasons in the following list might not be applicable to this specific API or operation:

    • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

      If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact Amazon Web Services Support.

    • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

    • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

    • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

    • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

    • ORGANIZATION_IS_ALREADY_PENDING_ALL_FEATURES_MIGRATION: The handshake request is invalid because the organization has already started the process to enable all features.

    • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

    • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

    • PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

    ", + "documentation":"

    The requested operation would violate the constraint identified in the reason code.

    Some of the reasons in the following list might not be applicable to this specific API or operation:

    • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

      If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact Amazon Web Services Support.

    • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

    • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

    • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

    • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

    • ORGANIZATION_IS_ALREADY_PENDING_ALL_FEATURES_MIGRATION: The handshake request is invalid because the organization has already started the process to enable all features.

    • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

    • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

    • PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

    ", "exception":true }, "HandshakeConstraintViolationExceptionReason":{ @@ -2929,7 +2929,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see Service Control Policy Syntax in the Organizations User Guide.

    ", + "documentation":"

    The provided policy document doesn't meet the requirements of the specified policy type. For example, the syntax might be incorrect. For details about service control policy syntax, see SCP syntax in the Organizations User Guide.

    ", "exception":true }, "MasterCannotLeaveOrganizationException":{ @@ -2986,7 +2986,7 @@ }, "FeatureSet":{ "shape":"OrganizationFeatureSet", - "documentation":"

    Specifies the functionality that currently is available to the organization. If set to \"ALL\", then all features are enabled and policies can be applied to accounts in the organization. If set to \"CONSOLIDATED_BILLING\", then only consolidated billing functionality is available. For more information, see Enabling All Features in Your Organization in the Organizations User Guide.

    " + "documentation":"

    Specifies the functionality that currently is available to the organization. If set to \"ALL\", then all features are enabled and policies can be applied to accounts in the organization. If set to \"CONSOLIDATED_BILLING\", then only consolidated billing functionality is available. For more information, see Enabling all features in your organization in the Organizations User Guide.

    " }, "MasterAccountArn":{ "shape":"AccountArn", @@ -3027,7 +3027,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The organization isn't empty. To delete an organization, you must first remove all accounts except the management account, delete all OUs, and delete all policies.

    ", + "documentation":"

    The organization isn't empty. To delete an organization, you must first remove all accounts except the management account.

    ", "exception":true }, "OrganizationalUnit":{ @@ -3280,7 +3280,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Managing Organizations Policiesin the Organizations User Guide.

    ", + "documentation":"

    You can't use the specified policy type with the feature set currently enabled for this organization. For example, you can enable SCPs only after you enable all features in the organization. For more information, see Managing Organizations policiesin the Organizations User Guide.

    ", "exception":true }, "PolicyTypeNotEnabledException":{ @@ -3288,7 +3288,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The specified policy type isn't currently enabled in this root. You can't attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling All Features in Your Organization in the Organizations User Guide.

    ", + "documentation":"

    The specified policy type isn't currently enabled in this root. You can't attach policies of the specified type to entities in a root until you enable that type in the root. For more information, see Enabling all features in your organization in the Organizations User Guide.

    ", "exception":true }, "PolicyTypeStatus":{ @@ -3323,7 +3323,7 @@ "members":{ "Content":{ "shape":"ResourcePolicyContent", - "documentation":"

    If provided, the new content for the resource policy. The text must be correctly formatted JSON that complies with the syntax for the resource policy's type. For more information, see Service Control Policy Syntax in the Organizations User Guide.

    " + "documentation":"

    If provided, the new content for the resource policy. The text must be correctly formatted JSON that complies with the syntax for the resource policy's type. For more information, see SCP syntax in the Organizations User Guide.

    " }, "Tags":{ "shape":"Tags", @@ -3581,7 +3581,7 @@ "Type":{"shape":"ExceptionType"}, "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    You have sent too many requests in too short a period of time. The quota helps protect against denial-of-service attacks. Try again later.

    For information about quotas that affect Organizations, see Quotas for Organizationsin the Organizations User Guide.

    ", + "documentation":"

    You have sent too many requests in too short a period of time. The quota helps protect against denial-of-service attacks. Try again later.

    For information about quotas that affect Organizations, see Quotas for Organizations in the Organizations User Guide.

    ", "exception":true }, "UnsupportedAPIEndpointException":{ @@ -3650,7 +3650,7 @@ }, "Content":{ "shape":"PolicyContent", - "documentation":"

    If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, see Service Control Policy Syntax in the Organizations User Guide.

    " + "documentation":"

    If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, see SCP syntax in the Organizations User Guide.

    " } } }, @@ -3664,5 +3664,5 @@ } } }, - "documentation":"

    Organizations is a web service that enables you to consolidate your multiple Amazon Web Services accounts into an organization and centrally manage your accounts and their resources.

    This guide provides descriptions of the Organizations operations. For more information about using this service, see the Organizations User Guide.

    Support and feedback for Organizations

    We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com or post your feedback and questions in the Organizations support forum. For more information about the Amazon Web Services support forums, see Forums Help.

    Endpoint to call When using the CLI or the Amazon Web Services SDK

    For the current release of Organizations, specify the us-east-1 region for all Amazon Web Services API and CLI calls made from the commercial Amazon Web Services Regions outside of China. If calling from one of the Amazon Web Services Regions in China, then specify cn-northwest-1. You can do this in the CLI by using these parameters and commands:

    • Use the following parameter with each command to specify both the endpoint and its region:

      --endpoint-url https://organizations.us-east-1.amazonaws.com (from commercial Amazon Web Services Regions outside of China)

      or

      --endpoint-url https://organizations.cn-northwest-1.amazonaws.com.cn (from Amazon Web Services Regions in China)

    • Use the default endpoint, but configure your default region with this command:

      aws configure set default.region us-east-1 (from commercial Amazon Web Services Regions outside of China)

      or

      aws configure set default.region cn-northwest-1 (from Amazon Web Services Regions in China)

    • Use the following parameter with each command to specify the endpoint:

      --region us-east-1 (from commercial Amazon Web Services Regions outside of China)

      or

      --region cn-northwest-1 (from Amazon Web Services Regions in China)

    Recording API Requests

    Organizations supports CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Organizations service received, who made the request and when, and so on. For more about Organizations and its support for CloudTrail, see Logging Organizations Events with CloudTrail in the Organizations User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

    " + "documentation":"

    Organizations is a web service that enables you to consolidate your multiple Amazon Web Services accounts into an organization and centrally manage your accounts and their resources.

    This guide provides descriptions of the Organizations operations. For more information about using this service, see the Organizations User Guide.

    Support and feedback for Organizations

    We welcome your feedback. Send your comments to feedback-awsorganizations@amazon.com or post your feedback and questions in the Organizations support forum. For more information about the Amazon Web Services support forums, see Forums Help.

    Endpoint to call When using the CLI or the Amazon Web Services SDK

    For the current release of Organizations, specify the us-east-1 region for all Amazon Web Services API and CLI calls made from the commercial Amazon Web Services Regions outside of China. If calling from one of the Amazon Web Services Regions in China, then specify cn-northwest-1. You can do this in the CLI by using these parameters and commands:

    • Use the following parameter with each command to specify both the endpoint and its region:

      --endpoint-url https://organizations.us-east-1.amazonaws.com (from commercial Amazon Web Services Regions outside of China)

      or

      --endpoint-url https://organizations.cn-northwest-1.amazonaws.com.cn (from Amazon Web Services Regions in China)

    • Use the default endpoint, but configure your default region with this command:

      aws configure set default.region us-east-1 (from commercial Amazon Web Services Regions outside of China)

      or

      aws configure set default.region cn-northwest-1 (from Amazon Web Services Regions in China)

    • Use the following parameter with each command to specify the endpoint:

      --region us-east-1 (from commercial Amazon Web Services Regions outside of China)

      or

      --region cn-northwest-1 (from Amazon Web Services Regions in China)

    Recording API Requests

    Organizations supports CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Organizations service received, who made the request and when, and so on. For more about Organizations and its support for CloudTrail, see Logging Organizations API calls with CloudTrail in the Organizations User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

    " } diff --git a/services/osis/pom.xml b/services/osis/pom.xml index 3af8f6c3c96..13d61405985 100644 --- a/services/osis/pom.xml +++ b/services/osis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT osis AWS Java SDK :: Services :: OSIS diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index f563efb19f7..ca5c8436097 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/panorama/pom.xml b/services/panorama/pom.xml index 2304aca3425..2fea541960b 100644 --- a/services/panorama/pom.xml +++ b/services/panorama/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT panorama AWS Java SDK :: Services :: Panorama diff --git a/services/paymentcryptography/pom.xml b/services/paymentcryptography/pom.xml index 878f5ee6eba..f3e82e22794 100644 --- a/services/paymentcryptography/pom.xml +++ b/services/paymentcryptography/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT paymentcryptography AWS Java SDK :: Services :: Payment Cryptography diff --git a/services/paymentcryptographydata/pom.xml b/services/paymentcryptographydata/pom.xml index 823ab5684fd..fd173af61bd 100644 --- a/services/paymentcryptographydata/pom.xml +++ b/services/paymentcryptographydata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT paymentcryptographydata AWS Java SDK :: Services :: Payment Cryptography Data diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 230c5c40081..01559439987 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index a3ce68c90e8..77eaab0ad6d 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index 3377e1fc068..5f02ed9ff37 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/pi/pom.xml b/services/pi/pom.xml index b2d591c3f86..876aa892a54 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pi/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/pi/src/main/resources/codegen-resources/endpoint-rule-set.json index ca59653fa9b..2a013d4fe26 100644 --- a/services/pi/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/pi/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://pi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://pi-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://pi-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://pi-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://pi.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://pi.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://pi.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://pi.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/pi/src/main/resources/codegen-resources/endpoint-tests.json b/services/pi/src/main/resources/codegen-resources/endpoint-tests.json index a720fcbb69a..73734bfa14e 100644 --- a/services/pi/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/pi/src/main/resources/codegen-resources/endpoint-tests.json @@ -429,6 +429,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -442,6 +453,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -455,6 +477,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { @@ -468,6 +501,17 @@ "UseDualStack": false } }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, { "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { @@ -531,6 +575,12 @@ "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/pi/src/main/resources/codegen-resources/paginators-1.json b/services/pi/src/main/resources/codegen-resources/paginators-1.json index 8392da5c25b..be1a4c984e9 100644 --- a/services/pi/src/main/resources/codegen-resources/paginators-1.json +++ b/services/pi/src/main/resources/codegen-resources/paginators-1.json @@ -19,6 +19,11 @@ "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" + }, + "ListPerformanceAnalysisReports": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/services/pi/src/main/resources/codegen-resources/service-2.json b/services/pi/src/main/resources/codegen-resources/service-2.json index 4a2840b5f60..d80c7e26312 100644 --- a/services/pi/src/main/resources/codegen-resources/service-2.json +++ b/services/pi/src/main/resources/codegen-resources/service-2.json @@ -14,6 +14,36 @@ "uid":"pi-2018-02-27" }, "operations":{ + "CreatePerformanceAnalysisReport":{ + "name":"CreatePerformanceAnalysisReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePerformanceAnalysisReportRequest"}, + "output":{"shape":"CreatePerformanceAnalysisReportResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Creates a new performance analysis report for a specific time period for the DB instance.

    " + }, + "DeletePerformanceAnalysisReport":{ + "name":"DeletePerformanceAnalysisReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePerformanceAnalysisReportRequest"}, + "output":{"shape":"DeletePerformanceAnalysisReportResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Deletes a performance analysis report.

    " + }, "DescribeDimensionKeys":{ "name":"DescribeDimensionKeys", "http":{ @@ -44,6 +74,21 @@ ], "documentation":"

    Get the attributes of the specified dimension group for a DB instance or data source. For example, if you specify a SQL ID, GetDimensionKeyDetails retrieves the full text of the dimension db.sql.statement associated with this ID. This operation is useful because GetResourceMetrics and DescribeDimensionKeys don't support retrieval of large SQL statement text.

    " }, + "GetPerformanceAnalysisReport":{ + "name":"GetPerformanceAnalysisReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPerformanceAnalysisReportRequest"}, + "output":{"shape":"GetPerformanceAnalysisReportResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Retrieves the report including the report ID, status, time details, and the insights with recommendations. The report status can be RUNNING, SUCCEEDED, or FAILED. The insights include the description and recommendation fields.

    " + }, "GetResourceMetadata":{ "name":"GetResourceMetadata", "http":{ @@ -103,9 +148,73 @@ {"shape":"NotAuthorizedException"} ], "documentation":"

    Retrieve metrics of the specified types that can be queried for a specified DB instance.

    " + }, + "ListPerformanceAnalysisReports":{ + "name":"ListPerformanceAnalysisReports", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListPerformanceAnalysisReportsRequest"}, + "output":{"shape":"ListPerformanceAnalysisReportsResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Lists all the analysis reports created for the DB instance. The reports are sorted based on the start time of each report.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Retrieves all the metadata tags associated with Amazon RDS Performance Insights resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Adds metadata tags to the Amazon RDS Performance Insights resource.

    " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"InternalServiceError"}, + {"shape":"NotAuthorizedException"} + ], + "documentation":"

    Deletes the metadata tags from the Amazon RDS Performance Insights resource.

    " } }, "shapes":{ + "AcceptLanguage":{ + "type":"string", + "enum":["EN_US"] + }, "AdditionalMetricsList":{ "type":"list", "member":{"shape":"RequestString"}, @@ -117,6 +226,161 @@ "key":{"shape":"RequestString"}, "value":{"shape":"Double"} }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"^arn:.*:pi:.*$" + }, + "AnalysisReport":{ + "type":"structure", + "required":["AnalysisReportId"], + "members":{ + "AnalysisReportId":{ + "shape":"AnalysisReportId", + "documentation":"

    The name of the analysis report.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    The unique identifier of the analysis report.

    " + }, + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    List the tags for the Amazon Web Services service for which Performance Insights returns metrics. Valid values are as follows:

    • RDS

    • DOCDB

    " + }, + "CreateTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The time you created the analysis report.

    " + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The analysis start time in the report.

    " + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The analysis end time in the report.

    " + }, + "Status":{ + "shape":"AnalysisStatus", + "documentation":"

    The status of the created analysis report.

    " + }, + "Insights":{ + "shape":"InsightList", + "documentation":"

    The list of identified insights in the analysis report.

    " + } + }, + "documentation":"

    Retrieves the summary of the performance analysis report created for a time period.

    " + }, + "AnalysisReportId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"report-[0-9a-f]{17}" + }, + "AnalysisReportSummary":{ + "type":"structure", + "members":{ + "AnalysisReportId":{ + "shape":"String", + "documentation":"

    The name of the analysis report.

    " + }, + "CreateTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The time you created the analysis report.

    " + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The start time of the analysis in the report.

    " + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The end time of the analysis in the report.

    " + }, + "Status":{ + "shape":"AnalysisStatus", + "documentation":"

    The status of the analysis report.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    List of all the tags added to the analysis report.

    " + } + }, + "documentation":"

    Retrieves the details of the performance analysis report.

    " + }, + "AnalysisReportSummaryList":{ + "type":"list", + "member":{"shape":"AnalysisReportSummary"} + }, + "AnalysisStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED" + ] + }, + "Boolean":{"type":"boolean"}, + "ContextType":{ + "type":"string", + "enum":[ + "CAUSAL", + "CONTEXTUAL" + ] + }, + "CreatePerformanceAnalysisReportRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier", + "StartTime", + "EndTime" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights will return metrics. Valid value is RDS.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    An immutable, Amazon Web Services Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.

    To use an Amazon RDS instance as a data source, you specify its DbiResourceId value. For example, specify db-ADECBTYHKTSAUMUZQYPDS2GW4A.

    " + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The start time defined for the analysis report.

    " + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The end time defined for the analysis report.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The metadata assigned to the analysis report consisting of a key-value pair.

    " + } + } + }, + "CreatePerformanceAnalysisReportResponse":{ + "type":"structure", + "members":{ + "AnalysisReportId":{ + "shape":"AnalysisReportId", + "documentation":"

    A unique identifier for the created analysis report.

    " + } + } + }, + "Data":{ + "type":"structure", + "members":{ + "PerformanceInsightsMetric":{ + "shape":"PerformanceInsightsMetric", + "documentation":"

    This field determines the Performance Insights metric to render for the insight. The name field refers to a Performance Insights metric.

    " + } + }, + "documentation":"

    List of data objects which provide details about source metrics. This field can be used to determine the PI metric to render for the insight. This data type also includes static values for the metrics for the Insight that were calculated and included in text and annotations on the DB load chart.

    " + }, + "DataList":{ + "type":"list", + "member":{"shape":"Data"} + }, "DataPoint":{ "type":"structure", "required":[ @@ -139,6 +403,33 @@ "type":"list", "member":{"shape":"DataPoint"} }, + "DeletePerformanceAnalysisReportRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier", + "AnalysisReportId" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights will return metrics. Valid value is RDS.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. In the console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the identifier is returned as DbiResourceId.

    To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " + }, + "AnalysisReportId":{ + "shape":"AnalysisReportId", + "documentation":"

    The unique identifier of the analysis report for deletion.

    " + } + } + }, + "DeletePerformanceAnalysisReportResponse":{ + "type":"structure", + "members":{ + } + }, "DescribeDimensionKeysRequest":{ "type":"structure", "required":[ @@ -155,7 +446,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights will return metrics. Valid values are as follows:

    • RDS

    • DOCDB

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable, Amazon Web Services Region-unique identifier for a data source. Performance Insights gathers metrics from this data source.

    To use an Amazon RDS instance as a data source, you specify its DbiResourceId value. For example, specify db-FAIHNTYBKTGAUSUZQYPDS2GW4A.

    " }, "StartTime":{ @@ -230,6 +521,17 @@ "max":2048, "min":1 }, + "DescriptiveMap":{ + "type":"map", + "key":{"shape":"DescriptiveString"}, + "value":{"shape":"DescriptiveString"} + }, + "DescriptiveString":{ + "type":"string", + "max":2000, + "min":1, + "pattern":"^.*$" + }, "DetailStatus":{ "type":"string", "enum":[ @@ -416,6 +718,45 @@ } } }, + "GetPerformanceAnalysisReportRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier", + "AnalysisReportId" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights will return metrics. Valid value is RDS.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. In the console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the identifier is returned as DbiResourceId.

    To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " + }, + "AnalysisReportId":{ + "shape":"AnalysisReportId", + "documentation":"

    A unique identifier of the created analysis report. For example, report-12345678901234567

    " + }, + "TextFormat":{ + "shape":"TextFormat", + "documentation":"

    Indicates the text format in the report. The options are PLAIN_TEXT or MARKDOWN. The default value is plain text.

    " + }, + "AcceptLanguage":{ + "shape":"AcceptLanguage", + "documentation":"

    The text language in the report. The default language is EN_US (English).

    " + } + } + }, + "GetPerformanceAnalysisReportResponse":{ + "type":"structure", + "members":{ + "AnalysisReport":{ + "shape":"AnalysisReport", + "documentation":"

    The summary of the performance analysis report created for a time period.

    " + } + } + }, "GetResourceMetadataRequest":{ "type":"structure", "required":[ @@ -428,7 +769,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics.

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " } } @@ -461,7 +802,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics. Valid values are as follows:

    • RDS

    • DOCDB

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. In the console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the identifier is returned as DbiResourceId.

    To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " }, "MetricQueries":{ @@ -524,7 +865,62 @@ "type":"string", "max":256, "min":0, - "pattern":"^db-[a-zA-Z0-9-]*$" + "pattern":"^[a-zA-Z0-9-]+$" + }, + "Insight":{ + "type":"structure", + "required":["InsightId"], + "members":{ + "InsightId":{ + "shape":"String", + "documentation":"

    The unique identifier for the insight. For example, insight-12345678901234567.

    " + }, + "InsightType":{ + "shape":"String", + "documentation":"

    The type of insight. For example, HighDBLoad, HighCPU, or DominatingSQLs.

    " + }, + "Context":{ + "shape":"ContextType", + "documentation":"

    Indicates if the insight is causal or correlated insight.

    " + }, + "StartTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The start time of the insight. For example, 2018-10-30T00:00:00Z.

    " + }, + "EndTime":{ + "shape":"ISOTimestamp", + "documentation":"

    The end time of the insight. For example, 2018-10-30T00:00:00Z.

    " + }, + "Severity":{ + "shape":"Severity", + "documentation":"

    The severity of the insight. The values are: Low, Medium, or High.

    " + }, + "SupportingInsights":{ + "shape":"InsightList", + "documentation":"

    List of supporting insights that provide additional factors for the insight.

    " + }, + "Description":{ + "shape":"MarkdownString", + "documentation":"

    Description of the insight. For example: A high severity Insight found between 02:00 to 02:30, where there was an unusually high DB load 600x above baseline. Likely performance impact.

    " + }, + "Recommendations":{ + "shape":"RecommendationList", + "documentation":"

    List of recommendations for the insight. For example, Investigate the following SQLs that contributed to 100% of the total DBLoad during that time period: sql-id.

    " + }, + "InsightData":{ + "shape":"DataList", + "documentation":"

    List of data objects containing metrics and references from the time range while generating the insight.

    " + }, + "BaselineData":{ + "shape":"DataList", + "documentation":"

    Metric names and values from the timeframe used as baseline to generate the insight.

    " + } + }, + "documentation":"

    Retrieves the list of performance issues which are identified.

    " + }, + "InsightList":{ + "type":"list", + "member":{"shape":"Insight"} }, "Integer":{"type":"integer"}, "InternalServiceError":{ @@ -562,7 +958,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics.

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable identifier for a data source that is unique within an Amazon Web Services Region. Performance Insights gathers metrics from this data source. To use an Amazon RDS DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VWZ.

    " }, "Metrics":{ @@ -605,7 +1001,7 @@ "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics.

    " }, "Identifier":{ - "shape":"RequestString", + "shape":"IdentifierString", "documentation":"

    An immutable identifier for a data source that is unique within an Amazon Web Services Region. Performance Insights gathers metrics from this data source. To use an Amazon RDS DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VWZ.

    " }, "MetricTypes":{ @@ -635,6 +1031,81 @@ } } }, + "ListPerformanceAnalysisReportsRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "Identifier" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics. Valid value is RDS.

    " + }, + "Identifier":{ + "shape":"IdentifierString", + "documentation":"

    An immutable identifier for a data source that is unique for an Amazon Web Services Region. Performance Insights gathers metrics from this data source. In the console, the identifier is shown as ResourceID. When you call DescribeDBInstances, the identifier is returned as DbiResourceId.

    To use a DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VW2X.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxResults.

    " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return in the response. If more items exist than the specified MaxResults value, a pagination token is included in the response so that the remaining results can be retrieved.

    " + }, + "ListTags":{ + "shape":"Boolean", + "documentation":"

    Specifies whether or not to include the list of tags in the response.

    " + } + } + }, + "ListPerformanceAnalysisReportsResponse":{ + "type":"structure", + "members":{ + "AnalysisReports":{ + "shape":"AnalysisReportSummaryList", + "documentation":"

    List of reports including the report identifier, start and end time, creation time, and status.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the token, up to the value specified by MaxResults.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "ResourceARN" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    List the tags for the Amazon Web Services service for which Performance Insights returns metrics. Valid value is RDS.

    " + }, + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    Lists all the tags for the Amazon RDS Performance Insights resource. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

    The metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    " + } + } + }, + "MarkdownString":{ + "type":"string", + "max":8000, + "min":0, + "pattern":"(.|\\n)*", + "sensitive":true + }, "MaxResults":{ "type":"integer", "max":25, @@ -718,7 +1189,7 @@ "type":"string", "max":8192, "min":1, - "pattern":"[\\s\\S]*" + "pattern":"^[a-zA-Z0-9_=-]+$" }, "NotAuthorizedException":{ "type":"structure", @@ -728,6 +1199,28 @@ "documentation":"

    The user is not authorized to perform this request.

    ", "exception":true }, + "PerformanceInsightsMetric":{ + "type":"structure", + "members":{ + "Metric":{ + "shape":"DescriptiveString", + "documentation":"

    The Performance Insights metric.

    " + }, + "DisplayName":{ + "shape":"DescriptiveString", + "documentation":"

    The Performance Insights metric name.

    " + }, + "Dimensions":{ + "shape":"DescriptiveMap", + "documentation":"

    A dimension map that contains the dimensions for this partition.

    " + }, + "Value":{ + "shape":"Double", + "documentation":"

    The value of the metric. For example, 9 for db.load.avg.

    " + } + }, + "documentation":"

    This data type helps to determine Performance Insights metric to render for the insight.

    " + }, "PeriodAlignment":{ "type":"string", "enum":[ @@ -735,6 +1228,24 @@ "START_TIME" ] }, + "Recommendation":{ + "type":"structure", + "members":{ + "RecommendationId":{ + "shape":"String", + "documentation":"

    The unique identifier for the recommendation.

    " + }, + "RecommendationDescription":{ + "shape":"MarkdownString", + "documentation":"

    The recommendation details to help resolve the performance issue. For example, Investigate the following SQLs that contributed to 100% of the total DBLoad during that time period: sql-id

    " + } + }, + "documentation":"

    The list of recommendations for the insight.

    " + }, + "RecommendationList":{ + "type":"list", + "member":{"shape":"Recommendation"} + }, "RequestString":{ "type":"string", "max":256, @@ -812,11 +1323,122 @@ "DOCDB" ] }, + "Severity":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH" + ] + }, "String":{ "type":"string", "max":256, "min":0, "pattern":".*\\S.*" + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

    A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with aws: or rds:. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").

    " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

    A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with aws: or rds:. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").

    " + } + }, + "documentation":"

    Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^.*$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "ResourceARN", + "Tags" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    The Amazon Web Services service for which Performance Insights returns metrics. Valid value is RDS.

    " + }, + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon RDS Performance Insights resource that the tags are added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The metadata assigned to an Amazon RDS resource consisting of a key-value pair.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^.*$" + }, + "TextFormat":{ + "type":"string", + "enum":[ + "PLAIN_TEXT", + "MARKDOWN" + ] + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ServiceType", + "ResourceARN", + "TagKeys" + ], + "members":{ + "ServiceType":{ + "shape":"ServiceType", + "documentation":"

    List the tags for the Amazon Web Services service for which Performance Insights returns metrics. Valid value is RDS.

    " + }, + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

    The Amazon RDS Performance Insights resource that the tags are added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN).

    " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

    The metadata assigned to an Amazon RDS Performance Insights resource consisting of a key-value pair.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } } }, "documentation":"Amazon RDS Performance Insights

    Amazon RDS Performance Insights enables you to monitor and explore different dimensions of database load based on data captured from a running DB instance. The guide provides detailed information about Performance Insights data types, parameters and errors.

    When Performance Insights is enabled, the Amazon RDS Performance Insights API provides visibility into the performance of your DB instance. Amazon CloudWatch provides the authoritative source for Amazon Web Services service-vended monitoring metrics. Performance Insights offers a domain-specific view of DB load.

    DB load is measured as average active sessions. Performance Insights provides the data to API consumers as a two-dimensional time-series dataset. The time dimension provides DB load data for each time point in the queried time range. Each time point decomposes overall load in relation to the requested dimensions, measured at that time point. Examples include SQL, Wait event, User, and Host.

    " diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index dc85d2167d2..78952dc7227 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpoint/src/main/resources/codegen-resources/customization.config b/services/pinpoint/src/main/resources/codegen-resources/customization.config index b77ce5f489b..4d02e0dff44 100644 --- a/services/pinpoint/src/main/resources/codegen-resources/customization.config +++ b/services/pinpoint/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : ["*"], + "excludedSimpleMethods" : ["*"], "renameShapes": { // Do not keep adding to this list. Require the service team to name enums like they're naming their shapes. "__EndpointTypesElement": "EndpointTypesElement" diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 57efc97b350..fd2072bed0f 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointemail/src/main/resources/codegen-resources/customization.config b/services/pinpointemail/src/main/resources/codegen-resources/customization.config index 95538dcb362..6bef416549e 100644 --- a/services/pinpointemail/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointemail/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "getAccount", "getDeliverabilityDashboardOptions" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "getDedicatedIps", "listConfigurationSets", "listDedicatedIpPools", diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index 1661eb0a551..d8d1fa2dca4 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config b/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config index 03992c18dda..7bd34e37614 100644 --- a/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listConfigurationSets" ] } diff --git a/services/pinpointsmsvoicev2/pom.xml b/services/pinpointsmsvoicev2/pom.xml index 84ac02000bb..492acd74327 100644 --- a/services/pinpointsmsvoicev2/pom.xml +++ b/services/pinpointsmsvoicev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT pinpointsmsvoicev2 AWS Java SDK :: Services :: Pinpoint SMS Voice V2 diff --git a/services/pipes/pom.xml b/services/pipes/pom.xml index 212cf833281..2b04b05260d 100644 --- a/services/pipes/pom.xml +++ b/services/pipes/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT pipes AWS Java SDK :: Services :: Pipes diff --git a/services/polly/pom.xml b/services/polly/pom.xml index a021f772989..3e39b8e79d7 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/polly/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/polly/src/main/resources/codegen-resources/endpoint-rule-set.json index 52b2277d14f..c25e9e1b918 100644 --- a/services/polly/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/polly/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://polly-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://polly-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://polly-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://polly-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://polly.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://polly.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://polly.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://polly.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/polly/src/main/resources/codegen-resources/service-2.json b/services/polly/src/main/resources/codegen-resources/service-2.json index 1b35df9150e..a175bc3b5b8 100644 --- a/services/polly/src/main/resources/codegen-resources/service-2.json +++ b/services/polly/src/main/resources/codegen-resources/service-2.json @@ -1108,7 +1108,8 @@ "Niamh", "Sofie", "Lisa", - "Isabelle" + "Isabelle", + "Zayd" ] }, "VoiceList":{ diff --git a/services/pom.xml b/services/pom.xml index 3ffc799d0be..06482078b59 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT services AWS Java SDK :: Services diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index da21d3eef28..40c82f6dae3 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 pricing diff --git a/services/pricing/src/main/resources/codegen-resources/customization.config b/services/pricing/src/main/resources/codegen-resources/customization.config index 6aad4b5fe48..55e49fef5fa 100644 --- a/services/pricing/src/main/resources/codegen-resources/customization.config +++ b/services/pricing/src/main/resources/codegen-resources/customization.config @@ -2,7 +2,7 @@ "verifiedSimpleMethods": [ "describeServices" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "getProducts" ] } diff --git a/services/privatenetworks/pom.xml b/services/privatenetworks/pom.xml index b54ddebbd0b..2354bb30709 100644 --- a/services/privatenetworks/pom.xml +++ b/services/privatenetworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT privatenetworks AWS Java SDK :: Services :: Private Networks diff --git a/services/proton/pom.xml b/services/proton/pom.xml index 44b0a82984d..c648b24a626 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index 3deef7058ea..4ae6e22e6af 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index 04225cd2f4a..1a1a6fedaa0 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index b92d96a71ee..8095f873082 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json index e5ce3441c3c..512e1e44b04 100644 --- a/services/quicksight/src/main/resources/codegen-resources/paginators-1.json +++ b/services/quicksight/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,17 @@ { "pagination": { + "DescribeFolderPermissions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Permissions" + }, + "DescribeFolderResolvedPermissions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Permissions" + }, "ListAnalyses": { "input_token": "NextToken", "output_token": "NextToken", @@ -42,6 +54,18 @@ "limit_key": "MaxResults", "result_key": "DataSources" }, + "ListFolderMembers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderMemberList" + }, + "ListFolders": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderSummaryList" + }, "ListGroupMemberships": { "input_token": "NextToken", "output_token": "NextToken", @@ -154,6 +178,12 @@ "limit_key": "MaxResults", "result_key": "DataSourceSummaries" }, + "SearchFolders": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "FolderSummaryList" + }, "SearchGroups": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index adde7fcdf86..c30dad216b4 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -1230,6 +1230,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], @@ -1248,6 +1249,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], @@ -2442,7 +2444,7 @@ {"shape":"UnsupportedPricingPlanException"}, {"shape":"InternalFailureException"} ], - "documentation":"

    Starts an asynchronous job that generates a dashboard snapshot. You can request one of the following format configurations per API call.

    • 1 paginated PDF

    • 5 CSVs

    Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once the job succeeds, use the DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates.

    " + "documentation":"

    Starts an asynchronous job that generates a dashboard snapshot. You can request one of the following format configurations per API call.

    • 1 paginated PDF

    • 1 Excel workbook

    • 5 CSVs

    Poll job descriptions with a DescribeDashboardSnapshotJob API call. Once the job succeeds, use the DescribeDashboardSnapshotJobResult API to obtain the download URIs that the job generates.

    " }, "TagResource":{ "name":"TagResource", @@ -3225,6 +3227,12 @@ "min":1, "pattern":"[\\w\\-]+|(\\$LATEST)|(\\$PUBLISHED)" }, + "AllSheetsFilterScopeConfiguration":{ + "type":"structure", + "members":{ + }, + "documentation":"

    The configuration for applying a filter to all sheets. You can apply this filter to all visuals on every sheet.

    This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

    " + }, "AmazonElasticsearchParameters":{ "type":"structure", "required":["Domain"], @@ -6907,13 +6915,13 @@ }, "MemberId":{ "shape":"RestrictiveResourceId", - "documentation":"

    The ID of the asset (the dashboard, analysis, or dataset).

    ", + "documentation":"

    The ID of the asset that you want to add to the folder.

    ", "location":"uri", "locationName":"MemberId" }, "MemberType":{ "shape":"MemberType", - "documentation":"

    The type of the member, including DASHBOARD, ANALYSIS, and DATASET.

    ", + "documentation":"

    The member type of the asset that you want to add to a folder.

    ", "location":"uri", "locationName":"MemberType" } @@ -6974,6 +6982,10 @@ "Tags":{ "shape":"TagList", "documentation":"

    Tags for the folder.

    " + }, + "SharingModel":{ + "shape":"SharingModel", + "documentation":"

    An optional parameter that determines the sharing scope of the folder. The default value for this parameter is ACCOUNT.

    " } } }, @@ -10428,13 +10440,13 @@ }, "MemberId":{ "shape":"RestrictiveResourceId", - "documentation":"

    The ID of the asset (the dashboard, analysis, or dataset) that you want to delete.

    ", + "documentation":"

    The ID of the asset that you want to delete.

    ", "location":"uri", "locationName":"MemberId" }, "MemberType":{ "shape":"MemberType", - "documentation":"

    The type of the member, including DASHBOARD, ANALYSIS, and DATASET

    ", + "documentation":"

    The member type of the asset that you want to delete from a folder.

    ", "location":"uri", "locationName":"MemberType" } @@ -12124,6 +12136,25 @@ "documentation":"

    The ID of the folder.

    ", "location":"uri", "locationName":"FolderId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

    The namespace of the folder whose permissions you want described.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per request.

    ", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A pagination token for the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" } } }, @@ -12150,6 +12181,10 @@ "RequestId":{ "shape":"String", "documentation":"

    The Amazon Web Services request ID for this operation.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The pagination token for the next set of results, or null if there are no more results.

    " } } }, @@ -12192,6 +12227,25 @@ "documentation":"

    The ID of the folder.

    ", "location":"uri", "locationName":"FolderId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

    The namespace of the folder whose permissions you want described.

    ", + "location":"querystring", + "locationName":"namespace" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to be returned per request.

    ", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A pagination token for the next set of results.

    ", + "location":"querystring", + "locationName":"next-token" } } }, @@ -12218,6 +12272,10 @@ "RequestId":{ "shape":"String", "documentation":"

    The Amazon Web Services request ID for this operation.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    A pagination token for the next set of results, or null if there are no more results.

    " } } }, @@ -14267,6 +14325,10 @@ "SelectedSheets":{ "shape":"SelectedSheetsFilterScopeConfiguration", "documentation":"

    The configuration for applying a filter to specific sheets.

    " + }, + "AllSheets":{ + "shape":"AllSheetsFilterScopeConfiguration", + "documentation":"

    The configuration for applying a filter to all sheets.

    " } }, "documentation":"

    The scope configuration for a FilterGroup.

    This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

    " @@ -14427,6 +14489,10 @@ "LastUpdatedTime":{ "shape":"Timestamp", "documentation":"

    The time that the folder was last updated.

    " + }, + "SharingModel":{ + "shape":"SharingModel", + "documentation":"

    The sharing scope of the folder.

    " } }, "documentation":"

    A folder in Amazon QuickSight.

    " @@ -14521,6 +14587,10 @@ "LastUpdatedTime":{ "shape":"Timestamp", "documentation":"

    The time that the folder was last updated.

    " + }, + "SharingModel":{ + "shape":"SharingModel", + "documentation":"

    The sharing scope of the folder.

    " } }, "documentation":"

    A summary of information about an existing Amazon QuickSight folder.

    " @@ -14618,10 +14688,7 @@ }, "ForecastComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -15938,10 +16005,7 @@ }, "GrowthRateComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -19269,7 +19333,6 @@ "type":"structure", "required":[ "ComputationId", - "Time", "Type" ], "members":{ @@ -19354,12 +19417,7 @@ }, "MetricComparisonComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time", - "FromValue", - "TargetValue" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -20636,10 +20694,7 @@ }, "PeriodOverPeriodComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -20662,10 +20717,7 @@ }, "PeriodToDateComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Time" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -23883,6 +23935,13 @@ }, "documentation":"

    The shape conditional formatting of a filled map visual.

    " }, + "SharingModel":{ + "type":"string", + "enum":[ + "ACCOUNT", + "NAMESPACE" + ] + }, "Sheet":{ "type":"structure", "members":{ @@ -24398,11 +24457,11 @@ "members":{ "SheetSelections":{ "shape":"SnapshotFileSheetSelectionList", - "documentation":"

    A list of SnapshotFileSheetSelection objects that contain information on the dashboard sheet that is exported. These objects provide information about the snapshot artifacts that are generated during the job. This structure can hold a maximum of 5 CSV configurations or 1 configuration for PDF.

    " + "documentation":"

    A list of SnapshotFileSheetSelection objects that contain information on the dashboard sheet that is exported. These objects provide information about the snapshot artifacts that are generated during the job. This structure can hold a maximum of 5 CSV configurations, 5 Excel configurations, or 1 configuration for PDF.

    " }, "FormatType":{ "shape":"SnapshotFileFormatType", - "documentation":"

    The format of the snapshot file to be generated. You can choose between CSV or PDF.

    " + "documentation":"

    The format of the snapshot file to be generated. You can choose between CSV, Excel, or PDF.

    " } }, "documentation":"

    A structure that contains the information for the snapshot that you want to generate. This information is provided by you when you start a new snapshot job.

    " @@ -24411,7 +24470,8 @@ "type":"string", "enum":[ "CSV", - "PDF" + "PDF", + "EXCEL" ] }, "SnapshotFileGroup":{ @@ -24427,7 +24487,7 @@ "SnapshotFileGroupList":{ "type":"list", "member":{"shape":"SnapshotFileGroup"}, - "max":6, + "max":7, "min":1 }, "SnapshotFileList":{ @@ -24445,15 +24505,15 @@ "members":{ "SheetId":{ "shape":"ShortRestrictiveResourceId", - "documentation":"

    The sheet ID of the dashboard to generate the snapshot artifact from. This value is required for CSV and PDF format types.

    " + "documentation":"

    The sheet ID of the dashboard to generate the snapshot artifact from. This value is required for CSV, Excel, and PDF format types.

    " }, "SelectionScope":{ "shape":"SnapshotFileSheetSelectionScope", - "documentation":"

    The selection scope of the visuals on a sheet of a dashboard that you are generating a snapthot of. You can choose one of the following options.

    • ALL_VISUALS - Selects all visuals that are on the sheet. This value is required if the snapshot is a PDF.

    • SELECTED_VISUALS - Select the visual that you want to add to the snapshot. This value is required if the snapshot is a CSV.

    " + "documentation":"

    The selection scope of the visuals on a sheet of a dashboard that you are generating a snapthot of. You can choose one of the following options.

    • ALL_VISUALS - Selects all visuals that are on the sheet. This value is required if the snapshot is a PDF.

    • SELECTED_VISUALS - Select the visual that you want to add to the snapshot. This value is required if the snapshot is a CSV or Excel workbook.

    " }, "VisualIds":{ "shape":"SnapshotFileSheetSelectionVisualIdList", - "documentation":"

    A list of visual IDs that are located in the selected sheet. This structure supports tables and pivot tables. This structure is required if you are generating a CSV. You can add a maximum of 1 visual ID to this structure.

    " + "documentation":"

    A structure that lists the IDs of the visuals in the selected sheet. Supported visual types are table, pivot table visuals. This value is required if you are generating a CSV or Excel workbook. This value supports a maximum of 1 visual ID for CSV and 5 visual IDs across up to 5 sheet selections for Excel. If you are generating an Excel workbook, the order of the visual IDs provided in this structure determines the order of the worksheets in the Excel file.

    " } }, "documentation":"

    A structure that contains information that identifies the snapshot that needs to be generated.

    " @@ -24461,7 +24521,7 @@ "SnapshotFileSheetSelectionList":{ "type":"list", "member":{"shape":"SnapshotFileSheetSelection"}, - "max":1, + "max":5, "min":1 }, "SnapshotFileSheetSelectionScope":{ @@ -24474,7 +24534,7 @@ "SnapshotFileSheetSelectionVisualIdList":{ "type":"list", "member":{"shape":"ShortRestrictiveResourceId"}, - "max":1, + "max":5, "min":1 }, "SnapshotJobErrorInfo":{ @@ -25425,14 +25485,24 @@ "members":{ "SelectedFieldOptions":{ "shape":"TableFieldOptionList", - "documentation":"

    The selected field options for the table field options.

    " + "documentation":"

    The field options to be configured to a table.

    " }, "Order":{ "shape":"FieldOrderList", - "documentation":"

    The order of field IDs of the field options for a table visual.

    " + "documentation":"

    The order of the field IDs that are configured as field options for a table visual.

    " + }, + "PinnedFieldOptions":{ + "shape":"TablePinnedFieldOptions", + "documentation":"

    The settings for the pinned columns of a table visual.

    " } }, - "documentation":"

    The field options for a table visual.

    " + "documentation":"

    The field options of a table visual.

    " + }, + "TableFieldOrderList":{ + "type":"list", + "member":{"shape":"FieldId"}, + "documentation":"

    A list of table field IDs.

    ", + "max":201 }, "TableFieldURLConfiguration":{ "type":"structure", @@ -25520,6 +25590,16 @@ }, "documentation":"

    The paginated report options for a table visual.

    " }, + "TablePinnedFieldOptions":{ + "type":"structure", + "members":{ + "PinnedLeftFields":{ + "shape":"TableFieldOrderList", + "documentation":"

    A list of columns to be pinned to the left of a table visual.

    " + } + }, + "documentation":"

    The settings for the pinned columns of a table visual.

    " + }, "TableRowConditionalFormatting":{ "type":"structure", "members":{ @@ -26660,8 +26740,6 @@ "type":"structure", "required":[ "ComputationId", - "Time", - "Category", "Type" ], "members":{ @@ -26709,7 +26787,6 @@ "type":"structure", "required":[ "ComputationId", - "Category", "Type" ], "members":{ @@ -27311,10 +27388,7 @@ }, "TotalAggregationComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Value" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", @@ -27690,10 +27764,7 @@ }, "UniqueValuesComputation":{ "type":"structure", - "required":[ - "ComputationId", - "Category" - ], + "required":["ComputationId"], "members":{ "ComputationId":{ "shape":"ShortRestrictiveResourceId", diff --git a/services/ram/pom.xml b/services/ram/pom.xml index 9b04ef32463..a9f9c9022e3 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/rbin/pom.xml b/services/rbin/pom.xml index 86acca2d4ce..79278e14ddd 100644 --- a/services/rbin/pom.xml +++ b/services/rbin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT rbin AWS Java SDK :: Services :: Rbin diff --git a/services/rds/pom.xml b/services/rds/pom.xml index 9ea4980a58c..1e92011f648 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/customization.config b/services/rds/src/main/resources/codegen-resources/customization.config index 547a01d77f0..fa6c19c9353 100644 --- a/services/rds/src/main/resources/codegen-resources/customization.config +++ b/services/rds/src/main/resources/codegen-resources/customization.config @@ -56,7 +56,7 @@ ] } }, - "blacklistedSimpleMethods" : ["failoverDBCluster"], + "excludedSimpleMethods" : ["failoverDBCluster"], "deprecatedShapes" : [ "BackupPolicyNotFoundFault" ], diff --git a/services/rds/src/main/resources/codegen-resources/paginators-1.json b/services/rds/src/main/resources/codegen-resources/paginators-1.json index b6db47f81c0..41d01c2eb56 100644 --- a/services/rds/src/main/resources/codegen-resources/paginators-1.json +++ b/services/rds/src/main/resources/codegen-resources/paginators-1.json @@ -12,6 +12,12 @@ "output_token": "Marker", "result_key": "Certificates" }, + "DescribeDBClusterAutomatedBackups": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBClusterAutomatedBackups" + }, "DescribeDBClusterBacktracks": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index c50e1eb47d7..188199cd2df 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -1611,7 +1611,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

    Initiates the failover process for an Aurora global database (GlobalCluster).

    A failover for an Aurora global database promotes one of secondary read-only DB clusters to be the primary DB cluster and demotes the primary DB cluster to being a secondary (read-only) DB cluster. In other words, the role of the current primary DB cluster and the selected (target) DB cluster are switched. The selected secondary DB cluster assumes full read/write capabilities for the Aurora global database.

    For more information about failing over an Amazon Aurora global database, see Managed planned failover for Amazon Aurora global databases in the Amazon Aurora User Guide.

    This action applies to GlobalCluster (Aurora global databases) only. Use this action only on healthy Aurora global databases with running Aurora DB clusters and no Region-wide outages, to test disaster recovery scenarios or to reconfigure your Aurora global database topology.

    " + "documentation":"

    Promotes the specified secondary DB cluster to be the primary DB cluster in the global database cluster to fail over or switch over a global database. Switchover operations were previously called \"managed planned failovers.\"

    Although this operation can be used either to fail over or to switch over a global database cluster, its intended use is for global database failover. To switch over a global database cluster, we recommend that you use the SwitchoverGlobalCluster operation instead.

    How you use this operation depends on whether you are failing over or switching over your global database cluster:

    • Failing over - Specify the AllowDataLoss parameter and don't specify the Switchover parameter.

    • Switching over - Specify the Switchover parameter or omit it, but don't specify the AllowDataLoss parameter.

    About failing over and switching over

    While failing over and switching over a global database cluster both change the primary DB cluster, you use these operations for different reasons:

    • Failing over - Use this operation to respond to an unplanned event, such as a Regional disaster in the primary Region. Failing over can result in a loss of write transaction data that wasn't replicated to the chosen secondary before the failover event occurred. However, the recovery process that promotes a DB instance on the chosen seconday DB cluster to be the primary writer DB instance guarantees that the data is in a transactionally consistent state.

      For more information about failing over an Amazon Aurora global database, see Performing managed failovers for Aurora global databases in the Amazon Aurora User Guide.

    • Switching over - Use this operation on a healthy global database cluster for planned events, such as Regional rotation or to fail back to the original primary DB cluster after a failover operation. With this operation, there is no data loss.

      For more information about switching over an Amazon Aurora global database, see Performing switchovers for Aurora global databases in the Amazon Aurora User Guide.

    " }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1986,7 +1986,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBInstanceStateFault"} ], - "documentation":"

    Modifies a setting for an Amazon Aurora global cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    This operation only applies to Aurora global database clusters.

    " + "documentation":"

    Modifies a setting for an Amazon Aurora global database cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide.

    This operation only applies to Aurora global database clusters.

    " }, "ModifyOptionGroup":{ "name":"ModifyOptionGroup", @@ -2657,6 +2657,25 @@ ], "documentation":"

    Switches over a blue/green deployment.

    Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment.

    For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.

    " }, + "SwitchoverGlobalCluster":{ + "name":"SwitchoverGlobalCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SwitchoverGlobalClusterMessage"}, + "output":{ + "shape":"SwitchoverGlobalClusterResult", + "resultWrapper":"SwitchoverGlobalClusterResult" + }, + "errors":[ + {"shape":"GlobalClusterNotFoundFault"}, + {"shape":"InvalidGlobalClusterStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

    Switches over the specified secondary DB cluster to be the new primary DB cluster in the global database cluster. Switchover operations were previously called \"managed planned failovers.\"

    Aurora promotes the specified secondary cluster to assume full read/write capabilities and demotes the current primary cluster to a secondary (read-only) cluster, maintaining the orginal replication topology. All secondary clusters are synchronized with the primary at the beginning of the process so the new primary continues operations for the Aurora global database without losing any data. Your database is unavailable for a short time while the primary and selected secondary clusters are assuming their new roles. For more information about switching over an Aurora global database, see Performing switchovers for Amazon Aurora global databases in the Amazon Aurora User Guide.

    This operation is intended for controlled environments, for operations such as \"regional rotation\" or to fall back to the original primary after a global database failover.

    " + }, "SwitchoverReadReplica":{ "name":"SwitchoverReadReplica", "http":{ @@ -3661,7 +3680,15 @@ "shape":"CustomDBEngineVersionManifest", "documentation":"

    The CEV manifest, which is a JSON document that describes the installation .zip files stored in Amazon S3. Specify the name/value pairs in a file or a quoted string. RDS Custom applies the patches in the order in which they are listed.

    The following JSON fields are valid:

    MediaImportTemplateVersion

    Version of the CEV manifest. The date is in the format YYYY-MM-DD.

    databaseInstallationFileNames

    Ordered list of installation files for the CEV.

    opatchFileNames

    Ordered list of OPatch installers used for the Oracle DB engine.

    psuRuPatchFileNames

    The PSU and RU patches for this CEV.

    OtherPatchFileNames

    The patches that are not in the list of PSU and RU patches. Amazon RDS applies these patches after applying the PSU and RU patches.

    For more information, see Creating the CEV manifest in the Amazon RDS User Guide.

    " }, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "SourceCustomDbEngineVersionIdentifier":{ + "shape":"String255", + "documentation":"

    Reserved for future use.

    " + }, + "UseAwsProvidedLatestImage":{ + "shape":"BooleanOptional", + "documentation":"

    Reserved for future use.

    " + } } }, "CreateDBClusterEndpointMessage":{ @@ -3847,7 +3874,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

    The storage type to associate with the DB cluster.

    For information on storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB clusters. For information on storage types for Multi-AZ DB clusters, see Settings for creating Multi-AZ DB clusters.

    This setting is required to create a Multi-AZ DB cluster.

    When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

    Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

    Valid Values:

    • Aurora DB clusters - aurora | aurora-iopt1

    • Multi-AZ DB clusters - io1

    Default:

    • Aurora DB clusters - aurora

    • Multi-AZ DB clusters - io1

    " + "documentation":"

    The storage type to associate with the DB cluster.

    For information on storage types for Aurora DB clusters, see Storage configurations for Amazon Aurora DB clusters. For information on storage types for Multi-AZ DB clusters, see Settings for creating Multi-AZ DB clusters.

    This setting is required to create a Multi-AZ DB cluster.

    When specified for a Multi-AZ DB cluster, a value for the Iops parameter is required.

    Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

    Valid Values:

    • Aurora DB clusters - aurora | aurora-iopt1

    • Multi-AZ DB clusters - io1

    Default:

    • Aurora DB clusters - aurora

    • Multi-AZ DB clusters - io1

    When you create an Aurora DB cluster with the storage type set to aurora-iopt1, the storage type is returned in the response. The storage type isn't returned when you set it to aurora.

    " }, "Iops":{ "shape":"IntegerOptional", @@ -9730,11 +9757,19 @@ "members":{ "GlobalClusterIdentifier":{ "shape":"GlobalClusterIdentifier", - "documentation":"

    Identifier of the Aurora global database (GlobalCluster) that should be failed over. The identifier is the unique key assigned by the user when the Aurora global database was created. In other words, it's the name of the Aurora global database that you want to fail over.

    Constraints:

    • Must match the identifier of an existing GlobalCluster (Aurora global database).

    " + "documentation":"

    The identifier of the global database cluster (Aurora global database) this operation should apply to. The identifier is the unique key assigned by the user when the Aurora global database is created. In other words, it's the name of the Aurora global database.

    Constraints:

    • Must match the identifier of an existing global database cluster.

    " }, "TargetDbClusterIdentifier":{ "shape":"DBClusterIdentifier", - "documentation":"

    Identifier of the secondary Aurora DB cluster that you want to promote to primary for the Aurora global database (GlobalCluster.) Use the Amazon Resource Name (ARN) for the identifier so that Aurora can locate the cluster in its Amazon Web Services Region.

    " + "documentation":"

    The identifier of the secondary Aurora DB cluster that you want to promote to the primary for the global database cluster. Use the Amazon Resource Name (ARN) for the identifier so that Aurora can locate the cluster in its Amazon Web Services Region.

    " + }, + "AllowDataLoss":{ + "shape":"BooleanOptional", + "documentation":"

    Specifies whether to allow data loss for this global database cluster operation. Allowing data loss triggers a global failover operation.

    If you don't specify AllowDataLoss, the global database cluster operation defaults to a switchover.

    Constraints:

    • Can't be specified together with the Switchover parameter.

    " + }, + "Switchover":{ + "shape":"BooleanOptional", + "documentation":"

    Specifies whether to switch over this global database cluster.

    Constraints:

    • Can't be specified together with the AllowDataLoss parameter.

    " } } }, @@ -9749,7 +9784,7 @@ "members":{ "Status":{ "shape":"FailoverStatus", - "documentation":"

    The current status of the Aurora global database (GlobalCluster). Possible values are as follows:

    • pending – A request to fail over the Aurora global database (GlobalCluster) has been received by the service. The GlobalCluster's primary DB cluster and the specified secondary DB cluster are being verified before the failover process can start.

    • failing-over – This status covers the range of Aurora internal operations that take place during the failover process, such as demoting the primary Aurora DB cluster, promoting the secondary Aurora DB, and synchronizing replicas.

    • cancelling – The request to fail over the Aurora global database (GlobalCluster) was cancelled and the primary Aurora DB cluster and the selected secondary Aurora DB cluster are returning to their previous states.

    " + "documentation":"

    The current status of the global cluster. Possible values are as follows:

    • pending – The service received a request to switch over or fail over the global cluster. The global cluster's primary DB cluster and the specified secondary DB cluster are being verified before the operation starts.

    • failing-over – This status covers the range of Aurora internal operations that take place during the switchover or failover process, such as demoting the primary Aurora DB cluster, promoting the secondary Aurora DB cluster, and synchronizing replicas.

    • cancelling – The request to switch over or fail over the global cluster was cancelled and the primary Aurora DB cluster and the selected secondary Aurora DB cluster are returning to their previous states.

    " }, "FromDbClusterArn":{ "shape":"String", @@ -9758,9 +9793,13 @@ "ToDbClusterArn":{ "shape":"String", "documentation":"

    The Amazon Resource Name (ARN) of the Aurora DB cluster that is currently being promoted, and which is associated with this state.

    " + }, + "IsDataLossAllowed":{ + "shape":"Boolean", + "documentation":"

    Indicates whether the operation is a global switchover or a global failover. If data loss is allowed, then the operation is a global failover. Otherwise, it's a switchover.

    " } }, - "documentation":"

    Contains the state of scheduled or in-process failover operations on an Aurora global database (GlobalCluster). This Data type is empty unless a failover operation is scheduled or is currently underway on the Aurora global database.

    ", + "documentation":"

    Contains the state of scheduled or in-process operations on a global cluster (Aurora global database). This data type is empty unless a switchover or failover operation is scheduled or is in progress on the Aurora global database.

    ", "wrapper":true }, "FailoverStatus":{ @@ -9852,7 +9891,7 @@ }, "FailoverState":{ "shape":"FailoverState", - "documentation":"

    A data object containing all properties for the current state of an in-process or pending failover process for this Aurora global database. This object is empty unless the FailoverGlobalCluster API operation has been called on this Aurora global database (GlobalCluster).

    " + "documentation":"

    A data object containing all properties for the current state of an in-process or pending switchover or failover process for this global cluster (Aurora global database). This object is empty unless the SwitchoverGlobalCluster or FailoverGlobalCluster operation was called on this global cluster.

    " } }, "documentation":"

    A data type representing an Aurora global database.

    ", @@ -9888,22 +9927,26 @@ "members":{ "DBClusterArn":{ "shape":"String", - "documentation":"

    The Amazon Resource Name (ARN) for each Aurora cluster.

    " + "documentation":"

    The Amazon Resource Name (ARN) for each Aurora DB cluster in the global cluster.

    " }, "Readers":{ "shape":"ReadersArnList", - "documentation":"

    The Amazon Resource Name (ARN) for each read-only secondary cluster associated with the Aurora global database.

    " + "documentation":"

    The Amazon Resource Name (ARN) for each read-only secondary cluster associated with the global cluster.

    " }, "IsWriter":{ "shape":"Boolean", - "documentation":"

    Specifies whether the Aurora cluster is the primary cluster (that is, has read-write capability) for the Aurora global database with which it is associated.

    " + "documentation":"

    Specifies whether the Aurora DB cluster is the primary cluster (that is, has read-write capability) for the global cluster with which it is associated.

    " }, "GlobalWriteForwardingStatus":{ "shape":"WriteForwardingStatus", - "documentation":"

    Specifies whether a secondary cluster in an Aurora global database has write forwarding enabled, not enabled, or is in the process of enabling it.

    " + "documentation":"

    Specifies whether a secondary cluster in the global cluster has write forwarding enabled, not enabled, or is in the process of enabling it.

    " + }, + "SynchronizationStatus":{ + "shape":"GlobalClusterMemberSynchronizationStatus", + "documentation":"

    The status of synchronization of each Aurora DB cluster in the global cluster.

    " } }, - "documentation":"

    A data structure with information about any primary and secondary clusters associated with an Aurora global database.

    ", + "documentation":"

    A data structure with information about any primary and secondary clusters associated with a global cluster (Aurora global database).

    ", "wrapper":true }, "GlobalClusterMemberList":{ @@ -9913,6 +9956,13 @@ "locationName":"GlobalClusterMember" } }, + "GlobalClusterMemberSynchronizationStatus":{ + "type":"string", + "enum":[ + "connected", + "pending-resync" + ] + }, "GlobalClusterNotFoundFault":{ "type":"structure", "members":{ @@ -12930,7 +12980,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

    The version number of the database engine to use.

    To list all of the available engine versions for aurora-mysql (Aurora MySQL), use the following command:

    aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

    Aurora MySQL

    Examples: 5.7.mysql_aurora.2.07.1, 8.0.mysql_aurora.3.02.0

    " + "documentation":"

    The version number of the database engine to use.

    To list all of the available engine versions for aurora-mysql (Aurora MySQL), use the following command:

    aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

    Aurora MySQL

    Examples: 5.7.mysql_aurora.2.12.0, 8.0.mysql_aurora.3.04.0

    " }, "Port":{ "shape":"IntegerOptional", @@ -12975,7 +13025,7 @@ }, "SourceEngineVersion":{ "shape":"String", - "documentation":"

    The version of the database that the backup files were created from.

    MySQL versions 5.5, 5.6, and 5.7 are supported.

    Example: 5.6.40, 5.7.28

    " + "documentation":"

    The version of the database that the backup files were created from.

    MySQL versions 5.7 and 8.0 are supported.

    Example: 5.7.40, 8.0.28

    " }, "S3BucketName":{ "shape":"String", @@ -14565,6 +14615,29 @@ "member":{"shape":"SwitchoverDetail"} }, "SwitchoverDetailStatus":{"type":"string"}, + "SwitchoverGlobalClusterMessage":{ + "type":"structure", + "required":[ + "GlobalClusterIdentifier", + "TargetDbClusterIdentifier" + ], + "members":{ + "GlobalClusterIdentifier":{ + "shape":"GlobalClusterIdentifier", + "documentation":"

    The identifier of the global database cluster to switch over. This parameter isn't case-sensitive.

    Constraints:

    • Must match the identifier of an existing global database cluster (Aurora global database).

    " + }, + "TargetDbClusterIdentifier":{ + "shape":"DBClusterIdentifier", + "documentation":"

    The identifier of the secondary Aurora DB cluster to promote to the new primary for the global database cluster. Use the Amazon Resource Name (ARN) for the identifier so that Aurora can locate the cluster in its Amazon Web Services Region.

    " + } + } + }, + "SwitchoverGlobalClusterResult":{ + "type":"structure", + "members":{ + "GlobalCluster":{"shape":"GlobalCluster"} + } + }, "SwitchoverReadReplicaMessage":{ "type":"structure", "required":["DBInstanceIdentifier"], diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index 6259decdc6d..9263dac5d50 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index d1a33631b3d..6d997f1fe35 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshift/src/main/resources/codegen-resources/customization.config b/services/redshift/src/main/resources/codegen-resources/customization.config index a15637c838c..635247225c3 100644 --- a/services/redshift/src/main/resources/codegen-resources/customization.config +++ b/services/redshift/src/main/resources/codegen-resources/customization.config @@ -21,7 +21,7 @@ "describeStorage", "describeTags" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeTableRestoreStatus", "describeClusterSecurityGroups" ] diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 2a37d4d50ab..09c6c22effd 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftserverless/pom.xml b/services/redshiftserverless/pom.xml index 0795179ed93..33203704027 100644 --- a/services/redshiftserverless/pom.xml +++ b/services/redshiftserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT redshiftserverless AWS Java SDK :: Services :: Redshift Serverless diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index f5864f27140..8e13fbcab66 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/customization.config b/services/rekognition/src/main/resources/codegen-resources/customization.config index 77a06980da8..ece3c940cc7 100644 --- a/services/rekognition/src/main/resources/codegen-resources/customization.config +++ b/services/rekognition/src/main/resources/codegen-resources/customization.config @@ -3,7 +3,7 @@ "listCollections", "listStreamProcessors" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeTableRestoreStatus", "describeClusterSecurityGroups" ] diff --git a/services/resiliencehub/pom.xml b/services/resiliencehub/pom.xml index 33b5841bb5d..d94762b5017 100644 --- a/services/resiliencehub/pom.xml +++ b/services/resiliencehub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT resiliencehub AWS Java SDK :: Services :: Resiliencehub diff --git a/services/resourceexplorer2/pom.xml b/services/resourceexplorer2/pom.xml index 22587e02f75..d6a03dacfee 100644 --- a/services/resourceexplorer2/pom.xml +++ b/services/resourceexplorer2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT resourceexplorer2 AWS Java SDK :: Services :: Resource Explorer 2 diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index 651a5d70f92..0a3c953f9e4 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index d411662b193..4c134f081ce 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index 838cc84847e..5fc66fadba1 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/rolesanywhere/pom.xml b/services/rolesanywhere/pom.xml index 2dc9be0a4a6..b81046fff25 100644 --- a/services/rolesanywhere/pom.xml +++ b/services/rolesanywhere/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT rolesanywhere AWS Java SDK :: Services :: Roles Anywhere diff --git a/services/route53/pom.xml b/services/route53/pom.xml index 196f1971c38..d88a66b57d8 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index 81cf9e46861..24994b06b19 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53domains/src/main/resources/codegen-resources/customization.config b/services/route53domains/src/main/resources/codegen-resources/customization.config index df62beb2be1..cbc750ecf2c 100644 --- a/services/route53domains/src/main/resources/codegen-resources/customization.config +++ b/services/route53domains/src/main/resources/codegen-resources/customization.config @@ -4,7 +4,7 @@ "listDomains", "listOperations" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "viewBilling", "getContactReachabilityStatus" ] diff --git a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json index 3ccf51cbf5b..3f5f8783314 100644 --- a/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/route53domains/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://route53domains-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://route53domains.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://route53domains.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/route53domains/src/main/resources/codegen-resources/service-2.json b/services/route53domains/src/main/resources/codegen-resources/service-2.json index 8bb0ddb21ab..db2da8ebd49 100644 --- a/services/route53domains/src/main/resources/codegen-resources/service-2.json +++ b/services/route53domains/src/main/resources/codegen-resources/service-2.json @@ -543,7 +543,7 @@ "documentation":"

    The name of the domain that was specified when another Amazon Web Services account submitted a TransferDomainToAnotherAwsAccount request.

    " }, "Password":{ - "shape":"String", + "shape":"Password", "documentation":"

    The password that was returned by the TransferDomainToAnotherAwsAccount request.

    " } }, @@ -567,7 +567,8 @@ }, "AddressLine":{ "type":"string", - "max":255 + "max":255, + "sensitive":true }, "AssociateDelegationSignerToDomainRequest":{ "type":"structure", @@ -693,13 +694,18 @@ "Transferability":{ "shape":"DomainTransferability", "documentation":"

    A complex type that contains information about whether the specified domain can be transferred to Route 53.

    " + }, + "Message":{ + "shape":"Message", + "documentation":"

    Provides an explanation for when a domain can't be transferred.

    " } }, "documentation":"

    The CheckDomainTransferability response includes the following elements.

    " }, "City":{ "type":"string", - "max":255 + "max":255, + "sensitive":true }, "Consent":{ "type":"structure", @@ -784,11 +790,13 @@ }, "ContactName":{ "type":"string", - "max":255 + "max":255, + "sensitive":true }, "ContactNumber":{ "type":"string", - "max":30 + "max":30, + "sensitive":true }, "ContactType":{ "type":"string", @@ -1054,7 +1062,8 @@ "ZA", "ZM", "ZW" - ] + ], + "sensitive":true }, "Currency":{ "type":"string", @@ -1366,6 +1375,10 @@ "DuplicateRequest":{ "type":"structure", "members":{ + "requestId":{ + "shape":"RequestId", + "documentation":"

    ID of the request operation.

    " + }, "message":{ "shape":"ErrorMessage", "documentation":"

    The request is already in progress for the domain.

    " @@ -1381,7 +1394,8 @@ }, "Email":{ "type":"string", - "max":254 + "max":254, + "sensitive":true }, "EnableDomainAutoRenewRequest":{ "type":"structure", @@ -1830,7 +1844,7 @@ }, "SortOrder":{ "shape":"SortOrder", - "documentation":"

    The sort order ofr returned values, either ascending or descending.

    " + "documentation":"

    The sort order for returned values, either ascending or descending.

    " } }, "documentation":"

    The ListOperations request includes the following elements.

    " @@ -1908,6 +1922,7 @@ }, "documentation":"

    The ListTagsForDomain response includes the following elements.

    " }, + "Message":{"type":"string"}, "Nameserver":{ "type":"structure", "required":["Name"], @@ -2044,6 +2059,10 @@ "type":"integer", "max":100 }, + "Password":{ + "type":"string", + "sensitive":true + }, "Price":{"type":"double"}, "PriceWithCurrency":{ "type":"structure", @@ -2207,6 +2226,7 @@ } } }, + "RequestId":{"type":"string"}, "Reseller":{"type":"string"}, "ResendContactReachabilityEmailRequest":{ "type":"structure", @@ -2292,7 +2312,8 @@ }, "State":{ "type":"string", - "max":255 + "max":255, + "sensitive":true }, "StatusFlag":{ "type":"string", @@ -2451,7 +2472,7 @@ "documentation":"

    Identifier for tracking the progress of the request. To query the operation status, use GetOperationDetail.

    " }, "Password":{ - "shape":"String", + "shape":"Password", "documentation":"

    To finish transferring a domain to another Amazon Web Services account, the account that the domain is being transferred to must submit an AcceptDomainTransferFromAnotherAwsAccount request. The request must include the value of the Password element that was returned in the TransferDomainToAnotherAwsAccount response.

    " } }, @@ -2652,7 +2673,8 @@ }, "ZipCode":{ "type":"string", - "max":255 + "max":255, + "sensitive":true } }, "documentation":"

    Amazon Route 53 API actions let you register domain names and perform related operations.

    " diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml index 0dd3739e76f..52e364570d9 100644 --- a/services/route53recoverycluster/pom.xml +++ b/services/route53recoverycluster/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT route53recoverycluster AWS Java SDK :: Services :: Route53 Recovery Cluster diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml index 9e7d93e6b95..6ee9de31a2d 100644 --- a/services/route53recoverycontrolconfig/pom.xml +++ b/services/route53recoverycontrolconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT route53recoverycontrolconfig AWS Java SDK :: Services :: Route53 Recovery Control Config diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml index f9ec52f370c..b037870c0a2 100644 --- a/services/route53recoveryreadiness/pom.xml +++ b/services/route53recoveryreadiness/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT route53recoveryreadiness AWS Java SDK :: Services :: Route53 Recovery Readiness diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index a628b91cb9b..6b7a614900c 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/route53resolver/src/main/resources/codegen-resources/customization.config b/services/route53resolver/src/main/resources/codegen-resources/customization.config index c06646195ea..daf08da254a 100644 --- a/services/route53resolver/src/main/resources/codegen-resources/customization.config +++ b/services/route53resolver/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listResolverEndpoints", "listResolverRuleAssociations", "listResolverRules" diff --git a/services/rum/pom.xml b/services/rum/pom.xml index 1df461cb60a..e0ab435e092 100644 --- a/services/rum/pom.xml +++ b/services/rum/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT rum AWS Java SDK :: Services :: RUM diff --git a/services/s3/pom.xml b/services/s3/pom.xml index f708f8c4238..b9bc614c6a4 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 @@ -153,6 +153,11 @@ equalsverifier test
    + + com.google.jimfs + jimfs + test + diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java index 1ca5526a1e4..c95d47f8fe5 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java @@ -50,6 +50,7 @@ import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; @@ -200,6 +201,38 @@ public void getObject_PresignedHttpRequestCanBeInvokedDirectlyBySdk() throws IOE } } + @Test + public void deleteObject_PresignedHttpRequestCanBeInvokedDirectlyBySdk() throws IOException { + String objectKey = generateRandomObjectKey(); + S3TestUtils.addCleanupTask(S3PresignerIntegrationTest.class, + () -> client.deleteObject(r -> r.bucket(testBucket).key(objectKey))); + client.putObject(r -> r.bucket(testBucket).key(objectKey), RequestBody.fromString("DeleteObjectPresignRequestTest")); + + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket(testBucket) + .key(testGetObjectKey) + .requestPayer(RequestPayer.REQUESTER))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + + SdkHttpClient httpClient = ApacheHttpClient.builder().build(); // or UrlConnectionHttpClient.builder().build() + + ContentStreamProvider requestPayload = presigned.signedPayload() + .map(SdkBytes::asContentStreamProvider) + .orElse(null); + + HttpExecuteRequest request = HttpExecuteRequest.builder() + .request(presigned.httpRequest()) + .contentStreamProvider(requestPayload) + .build(); + + HttpExecuteResponse response = httpClient.prepareRequest(request).call(); + + assertThat(response.responseBody()).isEmpty(); + assertThat(response.httpResponse().statusCode()).isEqualTo(204); + } + @Test public void putObject_PresignedHttpRequestCanBeInvokedDirectlyBySdk() throws IOException { String objectKey = generateRandomObjectKey(); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolver.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolver.java new file mode 100644 index 00000000000..9fc199175bd --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolver.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; +import software.amazon.awssdk.utils.Validate; + +/** + * Internal utility class to resolve {@link MultipartConfiguration}. + */ +@SdkInternalApi +public final class MultipartConfigurationResolver { + + private static final long DEFAULT_MIN_PART_SIZE = 8L * 1024 * 1024; + private final long minimalPartSizeInBytes; + private final long apiCallBufferSize; + private final long thresholdInBytes; + + public MultipartConfigurationResolver(MultipartConfiguration multipartConfiguration) { + Validate.notNull(multipartConfiguration, "multipartConfiguration"); + this.minimalPartSizeInBytes = Validate.getOrDefault(multipartConfiguration.minimumPartSizeInBytes(), + () -> DEFAULT_MIN_PART_SIZE); + this.apiCallBufferSize = Validate.getOrDefault(multipartConfiguration.apiCallBufferSizeInBytes(), + () -> minimalPartSizeInBytes * 4); + this.thresholdInBytes = Validate.getOrDefault(multipartConfiguration.thresholdInBytes(), () -> minimalPartSizeInBytes); + } + + public long minimalPartSizeInBytes() { + return minimalPartSizeInBytes; + } + + public long thresholdInBytes() { + return thresholdInBytes; + } + + public long apiCallBufferSize() { + return apiCallBufferSize; + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java index 65b26ddec97..8b53099b868 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartS3AsyncClient.java @@ -46,10 +46,6 @@ public final class MultipartS3AsyncClient extends DelegatingS3AsyncClient { private static final ApiName USER_AGENT_API_NAME = ApiName.builder().name("hll").version("s3Multipart").build(); - private static final long DEFAULT_MIN_PART_SIZE = 8L * 1024 * 1024; - private static final long DEFAULT_THRESHOLD = 8L * 1024 * 1024; - private static final long DEFAULT_API_CALL_BUFFER_SIZE = DEFAULT_MIN_PART_SIZE * 4; - private final UploadObjectHelper mpuHelper; private final CopyObjectHelper copyObjectHelper; @@ -57,21 +53,13 @@ private MultipartS3AsyncClient(S3AsyncClient delegate, MultipartConfiguration mu super(delegate); MultipartConfiguration validConfiguration = Validate.getOrDefault(multipartConfiguration, MultipartConfiguration.builder()::build); - long minPartSizeInBytes = Validate.getOrDefault(validConfiguration.minimumPartSizeInBytes(), - () -> DEFAULT_MIN_PART_SIZE); - long threshold = Validate.getOrDefault(validConfiguration.thresholdInBytes(), - () -> DEFAULT_THRESHOLD); - long apiCallBufferSizeInBytes = Validate.getOrDefault(validConfiguration.apiCallBufferSizeInBytes(), - () -> computeApiCallBufferSize(validConfiguration)); - mpuHelper = new UploadObjectHelper(delegate, minPartSizeInBytes, threshold, apiCallBufferSizeInBytes); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(validConfiguration); + long minPartSizeInBytes = resolver.minimalPartSizeInBytes(); + long threshold = resolver.thresholdInBytes(); + mpuHelper = new UploadObjectHelper(delegate, resolver); copyObjectHelper = new CopyObjectHelper(delegate, minPartSizeInBytes, threshold); } - private long computeApiCallBufferSize(MultipartConfiguration multipartConfiguration) { - return multipartConfiguration.minimumPartSizeInBytes() != null ? multipartConfiguration.minimumPartSizeInBytes() * 4 - : DEFAULT_API_CALL_BUFFER_SIZE; - } - @Override public CompletableFuture putObject(PutObjectRequest putObjectRequest, AsyncRequestBody requestBody) { return mpuHelper.uploadObject(putObjectRequest, requestBody); diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java index 0700e8ade5f..1ca499b57aa 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelper.java @@ -34,30 +34,28 @@ public final class UploadObjectHelper { private final long partSizeInBytes; private final GenericMultipartHelper genericMultipartHelper; - private final long maxMemoryUsageInBytes; + private final long apiCallBufferSize; private final long multipartUploadThresholdInBytes; private final UploadWithKnownContentLengthHelper uploadWithKnownContentLength; private final UploadWithUnknownContentLengthHelper uploadWithUnknownContentLength; public UploadObjectHelper(S3AsyncClient s3AsyncClient, - long partSizeInBytes, - long multipartUploadThresholdInBytes, - long maxMemoryUsageInBytes) { + MultipartConfigurationResolver resolver) { this.s3AsyncClient = s3AsyncClient; - this.partSizeInBytes = partSizeInBytes; + this.partSizeInBytes = resolver.minimalPartSizeInBytes(); this.genericMultipartHelper = new GenericMultipartHelper<>(s3AsyncClient, SdkPojoConversionUtils::toAbortMultipartUploadRequest, SdkPojoConversionUtils::toPutObjectResponse); - this.maxMemoryUsageInBytes = maxMemoryUsageInBytes; - this.multipartUploadThresholdInBytes = multipartUploadThresholdInBytes; + this.apiCallBufferSize = resolver.apiCallBufferSize(); + this.multipartUploadThresholdInBytes = resolver.thresholdInBytes(); this.uploadWithKnownContentLength = new UploadWithKnownContentLengthHelper(s3AsyncClient, partSizeInBytes, multipartUploadThresholdInBytes, - maxMemoryUsageInBytes); + apiCallBufferSize); this.uploadWithUnknownContentLength = new UploadWithUnknownContentLengthHelper(s3AsyncClient, partSizeInBytes, multipartUploadThresholdInBytes, - maxMemoryUsageInBytes); + apiCallBufferSize); } public CompletableFuture uploadObject(PutObjectRequest putObjectRequest, diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java index f7d199ac3aa..46caefca8d6 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/multipart/UploadWithKnownContentLengthHelper.java @@ -196,7 +196,9 @@ public void onSubscribe(Subscription s) { returnFuture.whenComplete((r, t) -> { if (t != null) { s.cancel(); - multipartUploadHelper.cancelingOtherOngoingRequests(futures, t); + if (failureActionInitiated.compareAndSet(false, true)) { + multipartUploadHelper.failRequestsElegantly(futures, t, uploadId, returnFuture, putObjectRequest); + } } }); } diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java index ec79bb132ff..ed9fed5f910 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java @@ -74,6 +74,7 @@ import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartRequest; @@ -81,10 +82,12 @@ import software.amazon.awssdk.services.s3.presigner.model.AbortMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.CompleteMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.CreateMultipartUploadPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.DeleteObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; @@ -93,6 +96,7 @@ import software.amazon.awssdk.services.s3.transform.AbortMultipartUploadRequestMarshaller; import software.amazon.awssdk.services.s3.transform.CompleteMultipartUploadRequestMarshaller; import software.amazon.awssdk.services.s3.transform.CreateMultipartUploadRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.DeleteObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.GetObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.PutObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.UploadPartRequestMarshaller; @@ -118,6 +122,7 @@ public final class DefaultS3Presigner extends DefaultSdkPresigner implements S3P private final PutObjectRequestMarshaller putObjectRequestMarshaller; private final CreateMultipartUploadRequestMarshaller createMultipartUploadRequestMarshaller; private final UploadPartRequestMarshaller uploadPartRequestMarshaller; + private final DeleteObjectRequestMarshaller deleteObjectRequestMarshaller; private final CompleteMultipartUploadRequestMarshaller completeMultipartUploadRequestMarshaller; private final AbortMultipartUploadRequestMarshaller abortMultipartUploadRequestMarshaller; private final SdkClientConfiguration clientConfiguration; @@ -172,6 +177,9 @@ private DefaultS3Presigner(Builder b) { // Copied from DefaultS3Client#uploadPart this.uploadPartRequestMarshaller = new UploadPartRequestMarshaller(protocolFactory); + // Copied from DefaultS3Client#deleteObject + this.deleteObjectRequestMarshaller = new DeleteObjectRequestMarshaller(protocolFactory); + // Copied from DefaultS3Client#completeMultipartUpload this.completeMultipartUploadRequestMarshaller = new CompleteMultipartUploadRequestMarshaller(protocolFactory); @@ -247,6 +255,17 @@ public PresignedPutObjectRequest presignPutObject(PutObjectPresignRequest reques .build(); } + @Override + public PresignedDeleteObjectRequest presignDeleteObject(DeleteObjectPresignRequest request) { + return presign(PresignedDeleteObjectRequest.builder(), + request, + request.deleteObjectRequest(), + DeleteObjectRequest.class, + deleteObjectRequestMarshaller::marshall, + "DeleteObject") + .build(); + } + @Override public PresignedCreateMultipartUploadRequest presignCreateMultipartUpload(CreateMultipartUploadPresignRequest request) { return presign(PresignedCreateMultipartUploadRequest.builder(), diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java index 28e418974db..be2500703e1 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/multipart/MultipartConfiguration.java @@ -93,13 +93,19 @@ public Long apiCallBufferSizeInBytes() { public interface Builder extends CopyableBuilder { /** - * Configures the minimum number of bytes of the body of the request required for requests to be converted to their - * multipart equivalent. Only taken into account when converting {@code putObject} and {@code copyObject} requests. - * Any request whose size is less than the configured value will not use multipart operation, - * even if multipart is enabled via {@link S3AsyncClientBuilder#multipartEnabled(Boolean)}. + * Configure the size threshold, in bytes, for when to use multipart upload. Uploads/copies over this size will + * automatically use a multipart upload strategy, while uploads/copies smaller than this threshold will use a single + * connection to upload/copy the whole object. + * *

    + * Multipart uploads are easier to recover from and also potentially faster than single part uploads, especially when the + * upload parts can be uploaded in parallel. Because there are additional network API calls, small objects are still + * recommended to use a single connection for the upload. See + * Uploading and copying objects using + * multipart upload. * - * Default value: 8 Mib + *

    + * By default, it is the same as {@link #minimumPartSizeInBytes(Long)}. * * @param thresholdInBytes the value of the threshold to set. * @return an instance of this builder. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java index c2aa3e45740..81a55e7bece 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java @@ -35,16 +35,19 @@ import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.presigner.model.AbortMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.CompleteMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.CreateMultipartUploadPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.DeleteObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; @@ -339,6 +342,50 @@ default PresignedPutObjectRequest presignPutObject(Consumer + * Example Usage + * + *

    +     * {@code
    +     *     S3Presigner presigner = ...;
    +     *
    +     *     // Create a DeleteObjectRequest to be pre-signed
    +     *     DeleteObjectRequest deleteObjectRequest = ...;
    +     *
    +     *     // Create a PutObjectPresignRequest to specify the signature duration
    +     *     DeleteObjectPresignRequest deleteObjectPresignRequest =
    +     *         DeleteObjectPresignRequest.builder()
    +     *                                   .signatureDuration(Duration.ofMinutes(10))
    +     *                                   .deleteObjectRequest(deleteObjectRequest)
    +     *                                   .build();
    +     *
    +     *     // Generate the presigned request
    +     *     PresignedDeleteObjectRequest presignedDeleteObjectRequest =
    +     *         presigner.presignDeleteObject(deleteObjectPresignRequest);
    +     * }
    +     * 
    + */ + PresignedDeleteObjectRequest presignDeleteObject(DeleteObjectPresignRequest request); + + /** + * Presign a {@link DeleteObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

    + * This is a shorter method of invoking {@link #presignDeleteObject(DeleteObjectPresignRequest)} without needing + * to call {@code DeleteObjectPresignRequest.builder()} or {@code .build()}. + * + * @see #presignDeleteObject(PresignedDeleteObjectRequest) + */ + default PresignedDeleteObjectRequest presignDeleteObject(Consumer request) { + DeleteObjectPresignRequest.Builder builder = DeleteObjectPresignRequest.builder(); + request.accept(builder); + return presignDeleteObject(builder.build()); + } + + /** * Presign a {@link CreateMultipartUploadRequest} so that it can be executed at a later time without requiring additional * signing or authentication. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/DeleteObjectPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/DeleteObjectPresignRequest.java new file mode 100644 index 00000000000..3fce17b22f5 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/DeleteObjectPresignRequest.java @@ -0,0 +1,138 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link DeleteObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + * + * @see S3Presigner#presignDeleteObject(DeleteObjectPresignRequest + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class DeleteObjectPresignRequest extends PresignRequest + implements ToCopyableBuilder { + private final DeleteObjectRequest deleteObjectRequest; + + protected DeleteObjectPresignRequest(DefaultBuilder builder) { + super(builder); + this.deleteObjectRequest = Validate.notNull(builder.deleteObjectRequest, "deleteObjectRequest"); + } + + /** + * Retrieve the {@link DeleteObjectRequest} that should be presigned. + */ + public DeleteObjectRequest deleteObjectRequest() { + return deleteObjectRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * Create a builder that can be used to create a {@link DeleteObjectPresignRequest}. + * + * @see S3Presigner#presignDeleteObject(DeleteObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + DeleteObjectPresignRequest that = (DeleteObjectPresignRequest) o; + + return deleteObjectRequest.equals(that.deleteObjectRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + deleteObjectRequest.hashCode(); + return result; + } + + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignRequest.Builder, + CopyableBuilder { + Builder deleteObjectRequest(DeleteObjectRequest deleteObjectRequest); + + default Builder deleteObjectRequest(Consumer deleteObjectRequest) { + DeleteObjectRequest.Builder builder = DeleteObjectRequest.builder(); + deleteObjectRequest.accept(builder); + return deleteObjectRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + DeleteObjectPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private DeleteObjectRequest deleteObjectRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(DeleteObjectPresignRequest deleteObjectPresignRequest) { + super(deleteObjectPresignRequest); + this.deleteObjectRequest = deleteObjectPresignRequest.deleteObjectRequest; + } + + @Override + public Builder deleteObjectRequest(DeleteObjectRequest deleteObjectRequest) { + this.deleteObjectRequest = deleteObjectRequest; + return this; + } + + @Override + public DeleteObjectPresignRequest build() { + return new DeleteObjectPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedDeleteObjectRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedDeleteObjectRequest.java new file mode 100644 index 00000000000..3ce2d256996 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedDeleteObjectRequest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed a {@link DeleteObjectRequest} that can be executed at a later time without requiring additional signing or + * authentication. + * + * @see S3Presigner#presignDeleteObject(DeleteObjectPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedDeleteObjectRequest extends PresignedRequest + implements ToCopyableBuilder { + + protected PresignedDeleteObjectRequest(DefaultBuilder builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * Create a builder that can be used to create a {@link PresignedDeleteObjectRequest}. + * + * @see S3Presigner#presignDeleteObject(DeleteObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * A builder for a {@link PresignedDeleteObjectRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedDeleteObjectRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignedRequest.DefaultBuilder + implements PresignedDeleteObjectRequest.Builder { + + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedDeleteObjectRequest presignedDeleteObjectRequest) { + super(presignedDeleteObjectRequest); + } + + @Override + public PresignedDeleteObjectRequest build() { + return new PresignedDeleteObjectRequest(this); + } + } +} diff --git a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json index cad90f95e8f..4b528a224f5 100644 --- a/services/s3/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/s3/src/main/resources/codegen-resources/endpoint-tests.json @@ -303,7 +303,6 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": true, "UseFIPS": false } @@ -940,10 +939,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -980,10 +977,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-east-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -1022,10 +1017,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-east-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3842,8 +3835,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3882,8 +3874,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3923,8 +3914,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -3963,8 +3953,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4003,8 +3992,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4044,8 +4032,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4073,8 +4060,7 @@ "ForcePathStyle": false, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4112,8 +4098,7 @@ "ForcePathStyle": false, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4152,8 +4137,7 @@ "ForcePathStyle": false, "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4219,8 +4203,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4259,8 +4242,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4300,8 +4282,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4340,8 +4321,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4380,8 +4360,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4421,8 +4400,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4450,8 +4428,7 @@ "ForcePathStyle": false, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -4490,8 +4467,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4558,8 +4534,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4599,8 +4574,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4626,10 +4600,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4668,8 +4640,7 @@ "ForcePathStyle": true, "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4706,8 +4677,7 @@ "Bucket": "99a_b", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4746,8 +4716,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4803,8 +4772,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4844,8 +4812,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4871,10 +4838,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4913,8 +4878,7 @@ "ForcePathStyle": true, "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4951,8 +4915,7 @@ "Bucket": "99a_b", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -4991,8 +4954,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5060,8 +5022,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5101,8 +5062,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5128,10 +5088,8 @@ "Bucket": "arn:PARTITION:s3-outposts:REGION:123456789012:outpost:op-01234567890123456:bucket:mybucket", "ForcePathStyle": true, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5170,8 +5128,7 @@ "ForcePathStyle": true, "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5208,8 +5165,7 @@ "Bucket": "99a_b", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5249,8 +5205,7 @@ "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5291,8 +5246,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5321,8 +5275,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5351,8 +5304,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5381,8 +5333,7 @@ "Endpoint": "http://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5421,10 +5372,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5464,8 +5413,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5506,8 +5454,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5550,8 +5497,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5605,10 +5551,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5648,8 +5592,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5690,8 +5633,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5720,8 +5662,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5750,8 +5691,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": true, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5780,8 +5720,7 @@ "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5820,10 +5759,8 @@ "ForcePathStyle": false, "Endpoint": "https://beta.example.com", "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5860,10 +5797,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5901,10 +5836,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -5930,10 +5863,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -5972,10 +5903,8 @@ "Bucket": "arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6012,10 +5941,8 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6028,7 +5955,6 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, "UseFIPS": true } @@ -6056,10 +5982,8 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6072,7 +5996,6 @@ "Bucket": "arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "cn-north-1", - "RequiresAccountId": true, "UseDualStack": true, "UseFIPS": true } @@ -6111,10 +6034,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6152,10 +6073,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6181,10 +6100,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6223,10 +6140,8 @@ "Bucket": "arn:aws:s3:af-south-1:123456789012:accesspoint:myendpoint", "ForcePathStyle": false, "Region": "af-south-1", - "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": true, - "___key": "key" + "UseFIPS": true } }, { @@ -6329,10 +6244,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6361,10 +6274,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6403,10 +6314,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6443,10 +6352,8 @@ "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "ForcePathStyle": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -6473,10 +6380,8 @@ "ForcePathStyle": false, "UseArnRegion": true, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { @@ -7224,10 +7129,8 @@ "ForcePathStyle": false, "UseArnRegion": false, "Region": "us-west-2", - "RequiresAccountId": true, "UseDualStack": false, - "UseFIPS": false, - "___key": "key" + "UseFIPS": false } }, { diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java index 760eb86b959..b413f7b33e0 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java @@ -23,8 +23,6 @@ import java.time.Clock; import java.time.Duration; import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZonedDateTime; import org.assertj.core.data.Offset; @@ -32,32 +30,26 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.auth.signer.AwsS3V4Signer; -import software.amazon.awssdk.auth.signer.internal.AbstractAws4Signer; import software.amazon.awssdk.auth.signer.internal.AbstractAwsS3V4Signer; -import software.amazon.awssdk.auth.signer.internal.Aws4SignerRequestParams; import software.amazon.awssdk.auth.signer.internal.SignerConstant; import software.amazon.awssdk.auth.signer.params.Aws4PresignerParams; -import software.amazon.awssdk.auth.signer.params.AwsS3V4SignerParams; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.checksums.ChecksumConstant; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.RequestPayer; import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; -import software.amazon.awssdk.utils.DateUtils; @RunWith(MockitoJUnitRunner.class) public class S3PresignerTest { @@ -349,6 +341,116 @@ public void putObject_Sigv4PresignerHonorsSignatureDuration() { }); } + @Test + public void deleteObject_IsNotUrlCompatible() { + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar"))); + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders().keySet()).containsExactlyInAnyOrder("host"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void deleteObject_EndpointOverrideIsIncludedInPresignedUrl() { + S3Presigner presigner = presignerBuilder().endpointOverride(URI.create("http://foo.com")).build(); + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar"))); + + assertThat(presigned.url().toString()).startsWith("http://foo34343434.foo.com/bar?"); + assertThat(presigned.signedHeaders().get("host")).containsExactly("foo34343434.foo.com"); + assertThat(presigned.signedPayload()).isEmpty(); + } + + @Test + public void deleteObject_CredentialsCanBeOverriddenAtTheRequestLevel() { + AwsCredentials clientCredentials = AwsBasicCredentials.create("a", "a"); + AwsCredentials requestCredentials = AwsBasicCredentials.create("b", "b"); + + S3Presigner presigner = presignerBuilder().credentialsProvider(() -> clientCredentials).build(); + + + AwsRequestOverrideConfiguration overrideConfiguration = + AwsRequestOverrideConfiguration.builder() + .credentialsProvider(() -> requestCredentials) + .build(); + + PresignedDeleteObjectRequest presignedWithClientCredentials = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar"))); + + PresignedDeleteObjectRequest presignedWithRequestCredentials = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar") + .overrideConfiguration(overrideConfiguration))); + + + assertThat(presignedWithClientCredentials.httpRequest().rawQueryParameters().get("X-Amz-Credential").get(0)) + .startsWith("a"); + assertThat(presignedWithRequestCredentials.httpRequest().rawQueryParameters().get("X-Amz-Credential").get(0)) + .startsWith("b"); + } + + @Test + public void deleteObject_AdditionalHeadersAndQueryStringsCanBeAdded() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .putHeader("X-Amz-AdditionalHeader", "foo1") + .putRawQueryParameter("additionalQueryParam", "foo2") + .build(); + + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar") + .overrideConfiguration(override))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders()).containsOnlyKeys("host", "x-amz-additionalheader"); + assertThat(presigned.signedHeaders().get("x-amz-additionalheader")).containsExactly("foo1"); + assertThat(presigned.httpRequest().headers()).containsKeys("x-amz-additionalheader"); + assertThat(presigned.httpRequest().rawQueryParameters().get("additionalQueryParam").get(0)).isEqualTo("foo2"); + } + + @Test + public void deleteObject_NonSigV4SignersRaisesException() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .signer(new NoOpSigner()) + .build(); + + assertThatThrownBy(() -> presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .deleteObjectRequest(delo -> delo.bucket("foo34343434") + .key("bar") + .overrideConfiguration(override)))) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("NoOpSigner"); + } + + @Test + public void deleteObject_Sigv4PresignerHonorsSignatureDuration() { + AwsRequestOverrideConfiguration override = + AwsRequestOverrideConfiguration.builder() + .signer(AwsS3V4Signer.create()) + .build(); + + PresignedDeleteObjectRequest presigned = + presigner.presignDeleteObject(r -> r.signatureDuration(Duration.ofSeconds(1234)) + .deleteObjectRequest(delo -> delo.bucket("a") + .key("b") + .overrideConfiguration(override))); + + assertThat(presigned.httpRequest().rawQueryParameters().get("X-Amz-Expires").get(0)).satisfies(expires -> { + assertThat(expires).containsOnlyDigits(); + assertThat(Integer.parseInt(expires)).isCloseTo(1234, Offset.offset(2)); + }); + } + @Test public void getObject_S3ConfigurationCanBeOverriddenToLeverageTransferAcceleration() { S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolverTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolverTest.java new file mode 100644 index 00000000000..99e929c09f4 --- /dev/null +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/MultipartConfigurationResolverTest.java @@ -0,0 +1,83 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.internal.multipart; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; + +public class MultipartConfigurationResolverTest { + + @Test + void resolveThresholdInBytes_valueNotProvided_shouldSameAsPartSize() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .minimumPartSizeInBytes(10L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.thresholdInBytes()).isEqualTo(10L); + } + + @Test + void resolveThresholdInBytes_valueProvided_shouldHonor() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .minimumPartSizeInBytes(1L) + .thresholdInBytes(12L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.thresholdInBytes()).isEqualTo(12L); + } + + @Test + void resolveApiCallBufferSize_valueProvided_shouldHonor() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .apiCallBufferSizeInBytes(100L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.apiCallBufferSize()).isEqualTo(100L); + } + + @Test + void resolveApiCallBufferSize_valueNotProvided_shouldComputeBasedOnPartSize() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .minimumPartSizeInBytes(10L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.apiCallBufferSize()).isEqualTo(40L); + } + + @Test + void valueProvidedForAllFields_shouldHonor() { + MultipartConfiguration configuration = MultipartConfiguration.builder() + .minimumPartSizeInBytes(10L) + .thresholdInBytes(8L) + .apiCallBufferSizeInBytes(3L) + .build(); + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(configuration); + assertThat(resolver.minimalPartSizeInBytes()).isEqualTo(10L); + assertThat(resolver.thresholdInBytes()).isEqualTo(8L); + assertThat(resolver.apiCallBufferSize()).isEqualTo(3L); + } + + @Test + void noValueProvided_shouldUseDefault() { + MultipartConfigurationResolver resolver = new MultipartConfigurationResolver(MultipartConfiguration.builder() + .build()); + assertThat(resolver.minimalPartSizeInBytes()).isEqualTo(8L * 1024 * 1024); + assertThat(resolver.thresholdInBytes()).isEqualTo(8L * 1024 * 1024); + assertThat(resolver.apiCallBufferSize()).isEqualTo(8L * 1024 * 1024 * 4); + } +} diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java index 11d54a73fb7..9758b77a9d8 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/internal/multipart/UploadObjectHelperTest.java @@ -38,6 +38,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; @@ -61,6 +62,7 @@ import software.amazon.awssdk.services.s3.model.PutObjectResponse; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.services.s3.multipart.MultipartConfiguration; import software.amazon.awssdk.testutils.RandomTempFile; import software.amazon.awssdk.utils.CompletableFutureUtils; @@ -97,7 +99,12 @@ public static Stream asyncRequestBody() { @BeforeEach public void beforeEach() { s3AsyncClient = Mockito.mock(S3AsyncClient.class); - uploadHelper = new UploadObjectHelper(s3AsyncClient, PART_SIZE, THRESHOLD, PART_SIZE * 2); + uploadHelper = new UploadObjectHelper(s3AsyncClient, + new MultipartConfigurationResolver(MultipartConfiguration.builder() + .minimumPartSizeInBytes(PART_SIZE) + .thresholdInBytes(THRESHOLD) + .thresholdInBytes(PART_SIZE * 2) + .build())); } @ParameterizedTest @@ -187,7 +194,8 @@ void mpu_onePartFailed_shouldFailOtherPartsAndAbort(AsyncRequestBody asyncReques CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, asyncRequestBody); - assertThatThrownBy(future::join).hasMessageContaining("Failed to send multipart upload requests").hasRootCause(exception); + assertThatThrownBy(() -> future.get(100, TimeUnit.MILLISECONDS)) + .hasMessageContaining("Failed to send multipart upload requests").hasRootCause(exception); verify(s3AsyncClient, never()).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); @@ -197,10 +205,10 @@ void mpu_onePartFailed_shouldFailOtherPartsAndAbort(AsyncRequestBody asyncReques assertThat(actualRequest.uploadId()).isEqualTo(UPLOAD_ID); try { - ongoingRequest.get(1, TimeUnit.MILLISECONDS); + ongoingRequest.get(100, TimeUnit.MILLISECONDS); fail("no exception thrown"); } catch (Exception e) { - assertThat(e.getCause()).hasMessageContaining("Failed to send multipart upload requests").hasRootCause(exception); + assertThat(e.getCause()).isEqualTo(exception); } } @@ -241,9 +249,17 @@ void upload_knownContentLengthCancelResponseFuture_shouldCancelUploadPart() { CompletableFuture future = uploadHelper.uploadObject(putObjectRequest, AsyncRequestBody.fromFile(testFile)); + when(s3AsyncClient.abortMultipartUpload(any(AbortMultipartUploadRequest.class))) + .thenReturn(CompletableFuture.completedFuture(AbortMultipartUploadResponse.builder().build())); + future.cancel(true); - assertThat(ongoingRequest).isCancelled(); + try { + ongoingRequest.join(); + fail("no exception"); + } catch (Exception exception) { + assertThat(ongoingRequest).isCancelled(); + } } @ParameterizedTest diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index 17e829b4330..614d119e7d0 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java b/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java deleted file mode 100644 index 40930fae167..00000000000 --- a/services/s3control/src/main/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptor.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3control.internal.interceptors; - - -import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; -import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; -import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.CLIENT_ENDPOINT; -import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.ENDPOINT_OVERRIDDEN; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.ENDPOINT_PREFIX; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.S3_OUTPOSTS; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isDualstackEnabled; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsEnabledInClientConfig; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsRegion; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isFipsRegion; -import static software.amazon.awssdk.services.s3control.internal.HandlerUtils.isUseArnRegionEnabledInClientConfig; -import static software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute.S3_ARNABLE_FIELD; - -import java.net.URI; -import java.util.Optional; -import software.amazon.awssdk.annotations.SdkInternalApi; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute; -import software.amazon.awssdk.core.SdkRequest; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.PartitionMetadata; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.internal.resource.S3OutpostResource; -import software.amazon.awssdk.services.s3.internal.resource.S3Resource; -import software.amazon.awssdk.services.s3.internal.settingproviders.UseArnRegionProviderChain; -import software.amazon.awssdk.services.s3control.S3ControlConfiguration; -import software.amazon.awssdk.services.s3control.internal.S3ArnableField; -import software.amazon.awssdk.services.s3control.internal.S3ControlArnConverter; -import software.amazon.awssdk.utils.StringUtils; -import software.amazon.awssdk.utils.Validate; - -/** - * Execution interceptor which modifies the HTTP request to S3 Control to - * change the endpoint to the correct endpoint. This includes prefixing the AWS - * account identifier and, when enabled, adding in FIPS and dualstack. - */ -@SdkInternalApi -public final class EndpointAddressInterceptor implements ExecutionInterceptor { - private static final String X_AMZ_OUTPOST_ID_HEADER = "x-amz-outpost-id"; - private static final UseArnRegionProviderChain USE_ARN_REGION_RESOLVER = UseArnRegionProviderChain.create(); - - @Override - public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, - ExecutionAttributes executionAttributes) { - Optional requestArn = getRequestArn(executionAttributes); - - if (requestArn.isPresent()) { - return resolveHostForOutpostArnRequest(context.httpRequest(), executionAttributes, requestArn.get()); - } else if (isNonArnOutpostRequest(context.request())) { - return resolveHostForOutpostNonArnRequest(context.httpRequest(), executionAttributes); - } else { - return resolveHostForNonOutpostNonArnRequest(context.httpRequest(), executionAttributes); - } - } - - private SdkHttpRequest resolveHostForOutpostArnRequest(SdkHttpRequest request, - ExecutionAttributes executionAttributes, - Arn arn) { - S3Resource s3Resource = S3ControlArnConverter.getInstance().convertArn(arn); - - S3ControlConfiguration serviceConfig = getServiceConfig(executionAttributes); - String signingRegion = executionAttributes.getAttribute(SIGNING_REGION).id(); - String arnRegion = s3Resource.region().orElseThrow(() -> new IllegalArgumentException("Region is missing from ARN.")); - String arnPartion = arn.partition(); - S3Resource parentS3Resource = s3Resource.parentS3Resource().orElse(null); - - Validate.isTrue(!isFipsInvolved(signingRegion, arnRegion, serviceConfig), - "FIPS is not supported for outpost requests."); - - // Even though we validated that we're not *calling* a FIPS region, the client region may still be a FIPS region if we're - // using the ARN region. For that reason, we need to strip off the "fips" from the signing region before we get the - // partition to make sure we're not making a cross-partition call. - signingRegion = removeFipsIfNeeded(signingRegion); - - String signingPartition = PartitionMetadata.of(Region.of(signingRegion)).id(); - - S3OutpostResource outpostResource = Validate.isInstanceOf(S3OutpostResource.class, parentS3Resource, - "The ARN passed must have a parent outpost resource."); - Validate.isTrue(!isDualstackEnabled(serviceConfig), "Dual stack endpoints are not supported for outpost requests."); - Validate.isTrue(arnPartion.equals(signingPartition), - "The partition field of the ARN being passed as a bucket parameter to an S3 operation does not match " - + "the partition the client has been configured with. Provided partition: '%s'; client partition: '%s'.", - arnPartion, signingPartition); - Validate.isTrue(useArnRegion(serviceConfig) || arnRegion.equals(signingRegion), - "The region field of the ARN being passed as a bucket parameter to an operation does not match the " - + "region the client was configured with. Provided region: '%s'; client region: '%s'.", - arnRegion, signingRegion); - - executionAttributes.putAttribute(SIGNING_REGION, Region.of(arnRegion)); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, S3_OUTPOSTS); - - SdkHttpRequest.Builder requestBuilder = request.toBuilder() - .appendHeader(X_AMZ_OUTPOST_ID_HEADER, outpostResource.outpostId()); - - if (isEndpointOverridden(executionAttributes)) { - // Drop endpoint prefix for ARN-based requests - requestBuilder.host(endpointOverride(executionAttributes).getHost()); - } else { - String arnPartitionDnsSuffix = PartitionMetadata.of(arnPartion).dnsSuffix(); - requestBuilder.host(String.format("s3-outposts.%s.%s", arnRegion, arnPartitionDnsSuffix)); - } - - return requestBuilder.build(); - } - - private SdkHttpRequest resolveHostForOutpostNonArnRequest(SdkHttpRequest sdkHttpRequest, - ExecutionAttributes executionAttributes) { - S3ControlConfiguration serviceConfig = getServiceConfig(executionAttributes); - Region signingRegion = executionAttributes.getAttribute(SIGNING_REGION); - - Validate.isTrue(!isDualstackEnabled(serviceConfig), - "Dual stack is not supported for outpost requests."); - Validate.isTrue(!isFipsEnabledInClientConfig(serviceConfig) && !isFipsRegion(signingRegion.id()), - "FIPS endpoints are not supported for outpost requests."); - - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, S3_OUTPOSTS); - - if (isEndpointOverridden(executionAttributes)) { - // Preserve endpoint prefix for endpoint-overridden non-ARN-based requests - return sdkHttpRequest; - } else { - String signingDnsSuffix = PartitionMetadata.of(signingRegion).dnsSuffix(); - return sdkHttpRequest.copy(r -> r.host(String.format("s3-outposts.%s.%s", signingRegion, signingDnsSuffix))); - } - } - - private SdkHttpRequest resolveHostForNonOutpostNonArnRequest(SdkHttpRequest request, - ExecutionAttributes executionAttributes) { - S3ControlConfiguration serviceConfig = getServiceConfig(executionAttributes); - - boolean isDualStackEnabled = isDualstackEnabled(serviceConfig); - boolean isFipsEnabledInClient = isFipsEnabledInClientConfig(serviceConfig); - - Validate.isTrue(!isDualStackEnabled || !isFipsEnabledInClient, "Dual stack and FIPS are not supported together."); - - if (isEndpointOverridden(executionAttributes)) { - Validate.isTrue(!isDualStackEnabled, "Dual stack is not supported with endpoint overrides."); - Validate.isTrue(!isFipsEnabledInClient, "FIPS is not supported with endpoint overrides."); - // Preserve endpoint prefix for endpoint-overridden non-ARN-based requests - return request; - } else if (isDualStackEnabled) { - String newEndpointPrefix = ENDPOINT_PREFIX + "." + "dualstack"; - return request.copy(r -> r.host(StringUtils.replace(request.host(), ENDPOINT_PREFIX, newEndpointPrefix))); - } else if (isFipsEnabledInClient) { - String newEndpointPrefix = ENDPOINT_PREFIX + "-" + "fips"; - return request.copy(r -> r.host(StringUtils.replace(request.host(), ENDPOINT_PREFIX, newEndpointPrefix))); - } else { - return request; - } - } - - private Optional getRequestArn(ExecutionAttributes executionAttributes) { - return Optional.ofNullable(executionAttributes.getAttribute(S3_ARNABLE_FIELD)) - .map(S3ArnableField::arn); - } - - private boolean isNonArnOutpostRequest(SdkRequest request) { - return request.getValueForField("OutpostId", String.class) - .map(StringUtils::isNotBlank) - .orElse(false); - } - - private S3ControlConfiguration getServiceConfig(ExecutionAttributes executionAttributes) { - return (S3ControlConfiguration) executionAttributes.getAttribute(AwsSignerExecutionAttribute.SERVICE_CONFIG); - } - - private boolean useArnRegion(S3ControlConfiguration configuration) { - // If useArnRegion is false, it was not set to false by the customer, it was simply not enabled - if (isUseArnRegionEnabledInClientConfig(configuration)) { - return true; - } - - return USE_ARN_REGION_RESOLVER.resolveUseArnRegion().orElse(false); - } - - private boolean isEndpointOverridden(ExecutionAttributes executionAttributes) { - return Boolean.TRUE.equals(executionAttributes.getAttribute(ENDPOINT_OVERRIDDEN)); - } - - private URI endpointOverride(ExecutionAttributes executionAttributes) { - return executionAttributes.getAttribute(CLIENT_ENDPOINT); - } - - private boolean isFipsInvolved(String signingRegion, String arnRegion, S3ControlConfiguration serviceConfig) { - if (serviceConfig.fipsModeEnabled()) { - return true; - } - - return isFipsRegion(signingRegion) || isFipsRegion(arnRegion); - } - - private String removeFipsIfNeeded(String region) { - if (region.startsWith("fips-")) { - return StringUtils.replace(region, "fips-", ""); - } - - if (region.endsWith("-fips")) { - return StringUtils.replace(region, "-fips", ""); - } - return region; - } -} diff --git a/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json index ac2587dad9a..8e4ec39fae7 100644 --- a/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -61,148 +61,265 @@ }, "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { "ref": "Region" + }, + "snow" + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "S3 Snow does not support DualStack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "S3 Snow does not support FIPS", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "OutpostId" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" }, - "snow" + true ] }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + } + ], + "error": "Partition does not support FIPS", + "type": "error" + }, + { + "conditions": [ { "fn": "isSet", "argv": [ { - "ref": "Endpoint" + "ref": "RequiresAccountId" } ] }, { - "fn": "parseURL", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] } - ], - "assign": "url" + ] } ], - "type": "tree", - "rules": [ + "error": "AccountId is required but not set", + "type": "error" + }, + { + "conditions": [ { - "conditions": [ + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ { - "fn": "aws.partition", + "fn": "isValidHostLabel", "argv": [ { - "ref": "Region" - } - ], - "assign": "partitionResult" + "ref": "AccountId" + }, + false + ] } - ], - "type": "tree", - "rules": [ + ] + } + ], + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" + }, + { + "conditions": [ + { + "fn": "not", + "argv": [ { - "conditions": [], - "type": "tree", - "rules": [ + "fn": "isValidHostLabel", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "S3 Snow does not support Dual-stack", - "type": "error" + "ref": "OutpostId" }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "S3 Snow does not support FIPS", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": "{url#scheme}://{url#authority}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } + false ] } ] - }, - { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" } - ] + ], + "error": "OutpostId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" }, { "conditions": [ { - "fn": "isSet", + "fn": "isValidHostLabel", "argv": [ { - "ref": "OutpostId" - } + "ref": "Region" + }, + true ] } ], @@ -211,318 +328,716 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid configuration: Outposts do not support dual-stack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } ], - "assign": "partitionResult" + "assign": "url" } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ + "fn": "booleanEquals", + "argv": [ { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "error": "Partition does not support FIPS", - "type": "error" + "ref": "UseFIPS" }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "Invalid region: region was not a valid DNS name.", + "type": "error" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccessPointName" + } + ] + }, + { + "fn": "aws.parseArn", + "argv": [ + { + "ref": "AccessPointName" + } + ], + "assign": "accessPointArn" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[0]" + ], + "assign": "arnType" + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "arnType" + }, + "" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "service" + ] + }, + "s3-outposts" + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid configuration: Outpost Access Points do not support dual-stack", + "type": "error" + }, + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[1]" + ], + "assign": "outpostId" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "outpostId" + }, + false + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "UseArnRegion" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseArnRegion" + }, + false + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ { - "ref": "AccountId" - } + "ref": "accessPointArn" + }, + "region" ] - } + }, + "{Region}" ] } + ] + } + ], + "error": "Invalid configuration: region from ARN `{accessPointArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", + "type": "error" + }, + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } ], - "error": "AccountId is required but not set", - "type": "error" - }, + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "aws.partition", + "argv": [ { - "fn": "isSet", + "fn": "getAttr", "argv": [ { - "ref": "AccountId" - } + "ref": "accessPointArn" + }, + "region" ] - }, + } + ], + "assign": "arnPartition" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "not", + "fn": "stringEquals", "argv": [ { - "fn": "isValidHostLabel", + "fn": "getAttr", + "argv": [ + { + "ref": "arnPartition" + }, + "name" + ] + }, + { + "fn": "getAttr", "argv": [ { - "ref": "AccountId" + "ref": "partitionResult" }, - false + "name" ] } ] } ], - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { "conditions": [ { - "fn": "not", + "fn": "isValidHostLabel", "argv": [ { - "fn": "isValidHostLabel", + "fn": "getAttr", "argv": [ { - "ref": "OutpostId" + "ref": "accessPointArn" }, - false + "region" ] - } + }, + true ] } ], - "error": "OutpostId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isValidHostLabel", + "fn": "not", "argv": [ { - "ref": "Region" - }, - true + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "accountId" + ] + }, + "" + ] + } ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "accountId" + ] + }, + false + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "isSet", "argv": [ { - "ref": "UseDualStack" - }, - true + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "AccountId" + }, + "{accessPointArn#accountId}" + ] + } ] } ], - "error": "Invalid configuration: Outposts do not support dual-stack", + "error": "Invalid ARN: the accountId specified in the ARN (`{accessPointArn#accountId}`) does not match the parameter (`{AccountId}`)", "type": "error" }, { - "conditions": [], + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "accessPointArn" + }, + "resourceId[2]" + ], + "assign": "outpostType" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", + "fn": "getAttr", "argv": [ { - "ref": "Endpoint" - } + "ref": "accessPointArn" + }, + "resourceId[3]" ], - "assign": "url" + "assign": "accessPointName" } ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ + "type": "tree", + "rules": [ + { + "conditions": [ { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "fn": "stringEquals", + "argv": [ + { + "ref": "outpostType" + }, + "accesspoint" + ] + } + ], + "type": "tree", + "rules": [ { - "ref": "UseFIPS" + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{accessPointArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{accessPointArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" } ] }, - "headers": {} - }, - "type": "endpoint" + { + "conditions": [], + "error": "Expected an outpost type `accesspoint`, found `{outpostType}`", + "type": "error" + } + ] }, { "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "error": "Invalid ARN: expected an access point name", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: Expected a 4-component resource", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{accessPointArn#accountId}`", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid region: region was not a valid DNS name.", + "error": "Invalid ARN: missing account ID", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid region in ARN: `{accessPointArn#region}` (invalid DNS name)", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", + "type": "error" } ] } ] } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "type": "error" } ] }, { "conditions": [], - "error": "A valid partition could not be determined", + "error": "Invalid ARN: The Outpost Id was not set", "type": "error" } ] + } + ] + }, + { + "conditions": [], + "error": "Invalid ARN: No ARN type specified", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Bucket" + } + ] + }, + { + "fn": "aws.parseArn", + "argv": [ + { + "ref": "Bucket" + } + ], + "assign": "bucketArn" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "resourceId[0]" + ], + "assign": "arnType" }, { - "conditions": [ + "fn": "not", + "argv": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { - "ref": "AccessPointName" - } + "ref": "arnType" + }, + "" ] - }, + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "aws.parseArn", + "fn": "stringEquals", "argv": [ { - "ref": "AccessPointName" - } - ], - "assign": "accessPointArn" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "service" + ] + }, + "s3-outposts" + ] } ], "type": "tree", @@ -530,28 +1045,29 @@ { "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "UseDualStack" }, - "resourceId[0]" - ], - "assign": "arnType" - }, + true + ] + } + ], + "error": "Invalid configuration: Outpost buckets do not support dual-stack", + "type": "error" + }, + { + "conditions": [ { - "fn": "not", + "fn": "getAttr", "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "" - ] - } - ] + "ref": "bucketArn" + }, + "resourceId[1]" + ], + "assign": "outpostId" } ], "type": "tree", @@ -559,18 +1075,12 @@ { "conditions": [ { - "fn": "stringEquals", + "fn": "isValidHostLabel", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "service" - ] + "ref": "outpostId" }, - "s3-outposts" + false ] } ], @@ -578,40 +1088,107 @@ "rules": [ { "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "UseArnRegion" + } + ] + }, { "fn": "booleanEquals", "argv": [ { - "ref": "UseDualStack" + "ref": "UseArnRegion" }, - true + false + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] + }, + "{Region}" + ] + } ] } ], - "error": "Invalid configuration: Outpost Access Points do not support dual-stack", + "error": "Invalid configuration: region from ARN `{bucketArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", "type": "error" }, { - "conditions": [], + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] + } + ], + "assign": "arnPartition" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "getAttr", + "fn": "aws.partition", "argv": [ { - "ref": "accessPointArn" - }, - "resourceId[1]" + "ref": "Region" + } ], - "assign": "outpostId" + "assign": "partitionResult" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "arnPartition" + }, + "name" + ] + }, + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -620,522 +1197,439 @@ "fn": "isValidHostLabel", "argv": [ { - "ref": "outpostId" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "region" + ] }, - false + true ] } ], "type": "tree", "rules": [ { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "not", + "argv": [ { - "fn": "isSet", + "fn": "stringEquals", "argv": [ { - "ref": "UseArnRegion" - } + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "accountId" + ] + }, + "" ] - }, + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ { - "fn": "booleanEquals", + "fn": "isValidHostLabel", "argv": [ { - "ref": "UseArnRegion" + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "accountId" + ] }, false ] - }, + } + ], + "type": "tree", + "rules": [ { - "fn": "not", - "argv": [ + "conditions": [ { - "fn": "stringEquals", + "fn": "isSet", "argv": [ { - "fn": "getAttr", + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "AccountId" }, - "region" + "{bucketArn#accountId}" ] - }, - "{Region}" + } ] } - ] - } - ], - "error": "Invalid configuration: region from ARN `{accessPointArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ + ], + "error": "Invalid ARN: the accountId specified in the ARN (`{bucketArn#accountId}`) does not match the parameter (`{AccountId}`)", + "type": "error" + }, { "conditions": [ { - "fn": "aws.partition", + "fn": "getAttr", "argv": [ { - "ref": "Region" - } + "ref": "bucketArn" + }, + "resourceId[2]" ], - "assign": "partitionResult" + "assign": "outpostType" } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "bucketArn" + }, + "resourceId[3]" + ], + "assign": "bucketName" + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "stringEquals", "argv": [ { - "fn": "getAttr", + "ref": "outpostType" + }, + "bucket" + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "accessPointArn" + "ref": "UseFIPS" }, - "region" + true ] } ], - "assign": "arnPartition" - } - ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://s3-outposts-fips.{bucketArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, { - "conditions": [ + "fn": "parseURL", + "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arnPartition" - }, - "name" - ] - }, - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - } - ] + "ref": "Endpoint" } ], - "type": "tree", - "rules": [ + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "region" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "accountId" - ] - }, - "" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "accountId" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "AccountId" - }, - "{accessPointArn#accountId}" - ] - } - ] - } - ], - "error": "Invalid ARN: the accountId specified in the ARN (`{accessPointArn#accountId}`) does not match the parameter (`{AccountId}`)", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "resourceId[2]" - ], - "assign": "outpostType" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "accessPointArn" - }, - "resourceId[3]" - ], - "assign": "accessPointName" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "outpostType" - }, - "accesspoint" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{accessPointArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{accessPointArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{accessPointArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{accessPointArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Expected an outpost type `accesspoint`, found `{outpostType}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: expected an access point name", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected a 4-component resource", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{accessPointArn#accountId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: missing account ID", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid region in ARN: `{accessPointArn#region}` (invalid DNS name)", - "type": "error" - } - ] + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" } ] }, - { - "conditions": [], - "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", - "type": "error" + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] } - ] + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://s3-outposts.{bucketArn#region}.{arnPartition#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" } ] }, { "conditions": [], - "error": "Could not load partition for ARN region `{accessPointArn#region}`", + "error": "Invalid ARN: Expected an outpost type `bucket`, found `{outpostType}`", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: expected a bucket name", + "type": "error" } ] }, { "conditions": [], - "error": "A valid partition could not be determined", + "error": "Invalid ARN: Expected a 4-component resource", "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{bucketArn#accountId}`", + "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid ARN: missing account ID", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "error": "Invalid region in ARN: `{bucketArn#region}` (invalid DNS name)", "type": "error" } ] + }, + { + "conditions": [], + "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", + "type": "error" } ] - }, - { - "conditions": [], - "error": "Invalid ARN: The Outpost Id was not set", - "type": "error" } ] } ] + }, + { + "conditions": [], + "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", + "type": "error" } ] }, { "conditions": [], - "error": "Invalid ARN: No ARN type specified", + "error": "Invalid ARN: The Outpost Id was not set", "type": "error" } ] + } + ] + }, + { + "conditions": [], + "error": "Invalid ARN: No ARN type specified", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "partitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "Region" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "partitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + } + ], + "error": "Partition does not support FIPS", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ] + } + ], + "error": "AccountId is required but not set", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "AccountId" + }, + false + ] + } + ] + } + ], + "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", + "type": "error" }, { "conditions": [ @@ -1143,18 +1637,18 @@ "fn": "isSet", "argv": [ { - "ref": "Bucket" + "ref": "Endpoint" } ] }, { - "fn": "aws.parseArn", + "fn": "parseURL", "argv": [ { - "ref": "Bucket" + "ref": "Endpoint" } ], - "assign": "bucketArn" + "assign": "url" } ], "type": "tree", @@ -1162,1304 +1656,493 @@ { "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "bucketArn" + "ref": "UseDualStack" }, - "resourceId[0]" - ], - "assign": "arnType" + true + ] + } + ], + "error": "Invalid Configuration: DualStack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] }, { - "fn": "not", + "fn": "booleanEquals", "argv": [ { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "" - ] + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [ + "endpoint": { + "url": "{url#scheme}://{AccountId}.{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "service" - ] - }, - "s3-outposts" - ] + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid configuration: Outpost buckets do not support dual-stack", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[1]" - ], - "assign": "outpostId" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "outpostId" - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "UseArnRegion" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseArnRegion" - }, - false - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - }, - "{Region}" - ] - } - ] - } - ], - "error": "Invalid configuration: region from ARN `{bucketArn#region}` does not match client region `{Region}` and UseArnRegion is `false`", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - } - ], - "assign": "arnPartition" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "partitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arnPartition" - }, - "name" - ] - }, - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "region" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "accountId" - ] - }, - "" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "accountId" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "AccountId" - }, - "{bucketArn#accountId}" - ] - } - ] - } - ], - "error": "Invalid ARN: the accountId specified in the ARN (`{bucketArn#accountId}`) does not match the parameter (`{AccountId}`)", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[2]" - ], - "assign": "outpostType" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "bucketArn" - }, - "resourceId[3]" - ], - "assign": "bucketName" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "outpostType" - }, - "bucket" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-outposts-fips.{bucketArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "https://s3-outposts.{bucketArn#region}.{arnPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{bucketArn#region}" - } - ] - }, - "headers": { - "x-amz-account-id": [ - "{bucketArn#accountId}" - ], - "x-amz-outpost-id": [ - "{outpostId}" - ] - } - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected an outpost type `bucket`, found `{outpostType}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: expected a bucket name", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Expected a 4-component resource", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `{bucketArn#accountId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: missing account ID", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid region in ARN: `{bucketArn#region}` (invalid DNS name)", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Client was configured for partition `{partitionResult#name}` but ARN has `{arnPartition#name}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Could not load partition for ARN region `{bucketArn#region}`", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`., found: `{outpostId}`", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The Outpost Id was not set", - "type": "error" - } - ] - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: No ARN type specified", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "partitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "Region" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "partitionResult" - }, - "name" - ] - }, - "aws-cn" - ] - } - ], - "error": "Partition does not support FIPS", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ] - } - ], - "error": "AccountId is required but not set", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "ref": "AccountId" - }, - false - ] - } - ] - } - ], - "error": "AccountId must only contain a-z, A-Z, 0-9 and `-`.", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - }, - { - "fn": "parseURL", - "argv": [ - { - "ref": "Endpoint" - } - ], - "assign": "url" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "{url#scheme}://{AccountId}.{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], - "endpoint": { - "url": "{url#scheme}://{url#authority}{url#path}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - } - ], - "endpoint": { - "url": "https://s3-control-fips.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "endpoint": { - "url": "https://s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "RequiresAccountId" - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "RequiresAccountId" - }, - true - ] - }, - { - "fn": "isSet", - "argv": [ - { - "ref": "AccountId" - } - ] - } - ], - "endpoint": { - "url": "https://{AccountId}.s3-control.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - false - ] - } - ], - "endpoint": { - "url": "https://s3-control.{Region}.{partitionResult#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - }, + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "{url#scheme}://{url#authority}{url#path}", + "properties": { + "authSchemes": [ { - "conditions": [], - "error": "Invalid region: region was not a valid DNS name.", - "type": "error" + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" } ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" } ] }, { - "conditions": [], - "error": "A valid partition could not be determined", - "type": "error" + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] } - ] + ], + "endpoint": { + "url": "https://{AccountId}.s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-control-fips.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://s3-control-fips.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-control.dualstack.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "RequiresAccountId" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "RequiresAccountId" + }, + true + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.s3-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://s3-control.{Region}.{partitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid region: region was not a valid DNS name.", + "type": "error" } ] - }, - { - "conditions": [], - "error": "Region must be set", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Region must be set", + "type": "error" } ] } \ No newline at end of file diff --git a/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json b/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json index afb8c275442..6d2656e10b9 100644 --- a/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json @@ -771,7 +771,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -811,7 +810,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -849,7 +847,6 @@ ], "params": { "Bucket": "blah", - "Operation": "CreateBucket", "Region": "us-east-2", "RequiresAccountId": false, "UseDualStack": false, @@ -880,14 +877,13 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", - "Operation": "ListRegionalBuckets", + "AccountId": "123456789012", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -909,7 +905,7 @@ } ] }, - "url": "https://123.s3-control.us-east-2.amazonaws.com" + "url": "https://123456789012.s3-control.us-east-2.amazonaws.com" } }, "operationInputs": [ @@ -919,13 +915,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "123", - "Operation": "ListRegionalBuckets", + "AccountId": "123456789012", "Region": "us-east-2", "RequiresAccountId": true, "UseDualStack": false, @@ -957,14 +952,13 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", - "Operation": "CreateBucket", + "AccountId": "123456789012", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1131,7 +1125,7 @@ { "documentation": "Account ID set inline and in ARN and they do not match@us-west-2", "expect": { - "error": "Invalid ARN: the accountId specified in the ARN (`123456789012`) does not match the parameter (`9999999`)" + "error": "Invalid ARN: the accountId specified in the ARN (`123456789012`) does not match the parameter (`999999999999`)" }, "operationInputs": [ { @@ -1141,14 +1135,14 @@ }, "operationName": "GetAccessPoint", "operationParams": { - "AccountId": "9999999", + "AccountId": "999999999999", "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint" } } ], "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "AccountId": "9999999", + "AccountId": "999999999999", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": false, @@ -1190,7 +1184,6 @@ "AccessPointName": "apname", "AccountId": "123456789012", "Endpoint": "https://control.vpce-1a2b3c4d-5e6f.s3.us-west-2.vpce.amazonaws.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1239,7 +1232,6 @@ "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1254,7 +1246,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1269,7 +1260,6 @@ "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1296,7 +1286,6 @@ "params": { "Bucket": "bucketname", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-west-2", "RequiresAccountId": false, @@ -1337,14 +1326,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -1376,15 +1365,14 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", + "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1418,15 +1406,14 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "123", + "AccountId": "123456789012", "OutpostId": "op-123" } } ], "params": { - "AccountId": "123", + "AccountId": "123456789012", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -1468,7 +1455,6 @@ "params": { "Bucket": "blah", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "123", "Region": "us-east-2", "RequiresAccountId": false, @@ -1484,7 +1470,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -1499,7 +1484,6 @@ "params": { "Bucket": "bucketname", "Endpoint": "https://beta.example.com", - "Operation": "CreateBucket", "OutpostId": "op-123", "Region": "us-west-2", "RequiresAccountId": false, @@ -1540,7 +1524,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -1584,7 +1569,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1628,7 +1614,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1673,7 +1660,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1718,7 +1706,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1776,7 +1765,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws-cn:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -1820,7 +1810,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1864,7 +1855,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1909,7 +1901,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -1954,7 +1947,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2012,7 +2006,8 @@ "operationName": "CreateAccessPoint", "operationParams": { "Bucket": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Name": "apname" + "Name": "apname", + "AccountId": "123456789012" } } ], @@ -2056,7 +2051,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2100,7 +2096,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2145,7 +2142,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws-us-gov:s3-outposts:us-gov-west-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2190,7 +2188,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -2356,11 +2355,11 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" } }, "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2501,7 +2500,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2511,12 +2510,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2561,7 +2560,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2572,12 +2571,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": false, @@ -2598,7 +2597,7 @@ } ] }, - "url": "https://1234567890-aBC.s3-control-fips.dualstack.us-east-1.amazonaws.com" + "url": "https://123456789012.s3-control-fips.dualstack.us-east-1.amazonaws.com" } }, "operationInputs": [ @@ -2610,12 +2609,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "UseDualStack": true, @@ -2636,7 +2635,7 @@ } ] }, - "url": "https://1234567890-aBC.example.com" + "url": "https://123456789012.example.com" } }, "operationInputs": [ @@ -2647,12 +2646,12 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "Endpoint": "https://example.com" @@ -2704,7 +2703,7 @@ } }, { - "documentation": "account id with custom endpoint, fips and dualstack", + "documentation": "account id with custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -2717,7 +2716,7 @@ } ] }, - "url": "https://1234567890-aBC.example.com" + "url": "https://123456789012.example.com" } }, "operationInputs": [ @@ -2729,21 +2728,20 @@ }, "operationName": "ListRegionalBuckets", "operationParams": { - "AccountId": "1234567890-aBC" + "AccountId": "123456789012" } } ], "params": { - "AccountId": "1234567890-aBC", + "AccountId": "123456789012", "Region": "us-east-1", "RequiresAccountId": true, "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": true + "UseFIPS": true } }, { - "documentation": "custom endpoint, fips and dualstack", + "documentation": "custom endpoint, fips", "expect": { "endpoint": { "properties": { @@ -2762,8 +2760,7 @@ "params": { "Region": "us-east-1", "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": true + "UseFIPS": true } }, { @@ -2786,32 +2783,19 @@ "params": { "Region": "us-east-1", "Endpoint": "https://example.com", - "UseFIPS": true, - "UseDualstack": false + "UseFIPS": true } }, { - "documentation": "custom endpoint, dualstack", + "documentation": "custom endpoint, DualStack", "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "s3", - "signingRegion": "us-east-1", - "disableDoubleEncoding": true - } - ] - }, - "url": "https://example.com" - } + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" }, "params": { "Region": "us-east-1", "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualstack": true + "UseDualStack": true } }, { @@ -2835,7 +2819,6 @@ "error": "AccountId is required but not set" }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -2862,7 +2845,6 @@ ], "params": { "AccountId": "/?invalid¬-host*label", - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-east-2", "RequiresAccountId": true, @@ -2943,7 +2925,6 @@ "AccessPointName": "apname", "Endpoint": "https://beta.example.com", "AccountId": "123456789012", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -2978,7 +2959,6 @@ "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "Endpoint": "https://beta.example.com", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": false, @@ -2986,9 +2966,9 @@ } }, { - "documentation": "Dualstack + Custom endpoint is not supported(non-arn)", + "documentation": "DualStack + Custom endpoint is not supported(non-arn)", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" }, "operationInputs": [ { @@ -3008,7 +2988,6 @@ "AccessPointName": "apname", "Endpoint": "https://beta.example.com", "AccountId": "123456789012", - "Operation": "GetAccessPoint", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -3029,14 +3008,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -3063,7 +3042,6 @@ ], "params": { "AccountId": "0123456789012", - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "cn-north-1", "RequiresAccountId": true, @@ -3090,7 +3068,6 @@ ], "params": { "AccountId": "0123456789012", - "Operation": "ListRegionalBuckets", "OutpostId": "?outpost/invalid+", "Region": "us-west-1", "RequiresAccountId": true, @@ -3118,7 +3095,6 @@ "error": "Invalid region: region was not a valid DNS name." }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "invalid-region 42", "AccountId": "0123456", @@ -3145,7 +3121,6 @@ } }, "params": { - "Operation": "ListRegionalBuckets", "OutpostId": "op-123", "Region": "us-west-2", "UseDualStack": false, @@ -3205,14 +3180,14 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Endpoint": "https://beta.example.com", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": false, @@ -3308,7 +3283,8 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], @@ -3333,13 +3309,13 @@ }, "operationName": "GetBucket", "operationParams": { - "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket" + "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" } } ], "params": { "Bucket": "arn:aws:s3-outposts:cn-north-1:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Operation": "GetBucket", "Region": "us-west-2", "RequiresAccountId": true, "UseArnRegion": true, @@ -3568,22 +3544,20 @@ "Bucket": "bucketName", "Endpoint": "https://10.0.1.12:433", "UseFIPS": true, - "UseDualStack": false, - "Accelerate": false + "UseDualStack": false } }, { - "documentation": "S3 Snow Control with Dual-stack enabled", + "documentation": "S3 Snow Control with Dualstack enabled", "expect": { - "error": "S3 Snow does not support Dual-stack" + "error": "S3 Snow does not support DualStack" }, "params": { "Region": "snow", "Bucket": "bucketName", "Endpoint": "https://10.0.1.12:433", "UseFIPS": false, - "UseDualStack": true, - "Accelerate": false + "UseDualStack": true } } ], diff --git a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java b/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java deleted file mode 100644 index ce035d11b19..00000000000 --- a/services/s3control/src/test/java/software/amazon/awssdk/services/s3control/internal/interceptors/EndpointAddressInterceptorTest.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.awssdk.services.s3control.internal.interceptors; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SERVICE_SIGNING_NAME; -import static software.amazon.awssdk.auth.signer.AwsSignerExecutionAttribute.SIGNING_REGION; -import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.SERVICE_CONFIG; -import static software.amazon.awssdk.services.s3control.internal.S3ControlInternalExecutionAttribute.S3_ARNABLE_FIELD; - -import java.util.Optional; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.core.Protocol; -import software.amazon.awssdk.core.SdkRequest; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpFullRequest; -import software.amazon.awssdk.http.SdkHttpMethod; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3control.S3ControlClient; -import software.amazon.awssdk.services.s3control.S3ControlConfiguration; -import software.amazon.awssdk.services.s3control.internal.S3ArnableField; -import software.amazon.awssdk.services.s3control.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3control.model.ListRegionalBucketsRequest; - -public class EndpointAddressInterceptorTest { - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private static final String X_AMZ_ACCOUNT_ID = "x-amz-account-id"; - private static final String ACCOUNT_ID = "123456789012"; - - private SdkHttpRequest request; - private S3ControlConfiguration configuration; - private ExecutionAttributes executionAttributes; - - @Before - public void setup() { - request = SdkHttpFullRequest.builder() - .appendHeader(X_AMZ_ACCOUNT_ID, ACCOUNT_ID) - .protocol(Protocol.HTTPS.toString()) - .method(SdkHttpMethod.POST) - .host(S3ControlClient.serviceMetadata().endpointFor(Region.US_EAST_1).toString()) - .build(); - configuration = S3ControlConfiguration.builder().build(); - executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3-control"); - executionAttributes.putAttribute(SIGNING_REGION, Region.of("us-east-1")); - executionAttributes.putAttribute(SERVICE_CONFIG, configuration); - } - - @Test - public void modifyHttpRequest_ResolvesCorrectHost_StandardSettings() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), new ExecutionAttributes()); - assertThat(modified.host()).isEqualTo("s3-control.us-east-1.amazonaws.com"); - } - - @Test - public void modifyHttpRequest_ResolvesCorrectHost_Dualstack() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().dualstackEnabled(true).build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), executionAttributes); - assertThat(modified.host()).isEqualTo("s3-control.dualstack.us-east-1.amazonaws.com"); - } - - @Test - public void modifyHttpRequest_ResolvesCorrectHost_Fips() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().fipsModeEnabled(true).build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request), executionAttributes); - assertThat(modified.host()).isEqualTo("s3-control-fips.us-east-1.amazonaws.com"); - } - - @Test - public void createBucketRequestWithOutpostId_shouldRedirect() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - CreateBucketRequest createBucketRequest = CreateBucketRequest.builder().outpostId("1234").build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(createBucketRequest), - executionAttributes); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); - assertThat(modified.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); - } - - @Test - public void listRegionalBucketsRequestsWithOutpostId_shouldRedirect() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder().outpostId("1234").build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); - assertThat(modified.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); - } - - @Test - public void listRegionalBucketsRequestsWithoutOutpostId_shouldNotRedirect() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder().build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() - .dualstackEnabled(true) - .build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3"); - assertThat(modified.host()).isEqualTo("s3-control.dualstack.us-east-1.amazonaws.com"); - } - - @Test - public void createBucketRequestsWithoutOutpostId_shouldNotRedirect() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() - .build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() - .fipsModeEnabled(true) - .build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - SdkHttpRequest modified = interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3"); - assertThat(modified.host()).isEqualTo("s3-control-fips.us-east-1.amazonaws.com"); - } - - @Test - public void listRegionalBucketsRequestWithOutpostId_fipsEnabled_shouldThrowException() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() - .outpostId("123") - .build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().fipsModeEnabled(true).build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - assertThatThrownBy(() -> interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes)).hasMessageContaining("FIPS endpoints are " - + "not supported"); - } - - @Test - public void listRegionalBucketsRequestWithOutpostId_fipsDualsackEnabled_shouldThrowException() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - ListRegionalBucketsRequest sdkRequest = ListRegionalBucketsRequest.builder() - .outpostId("123") - .build(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder().dualstackEnabled(true).build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - executionAttributes.putAttribute(SIGNING_REGION, Region.US_EAST_1); - executionAttributes.putAttribute(SERVICE_SIGNING_NAME, "s3"); - - assertThatThrownBy(() -> interceptor.modifyHttpRequest(new Context(request).request(sdkRequest), - executionAttributes)) - .hasMessageContaining("Dual stack"); - } - - @Test(expected = IllegalArgumentException.class) - public void modifyHttpRequest_ThrowsException_FipsAndDualstack() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - S3ControlConfiguration controlConfiguration = S3ControlConfiguration.builder() - .fipsModeEnabled(true) - .dualstackEnabled(true) - .build(); - ExecutionAttributes executionAttributes = new ExecutionAttributes(); - executionAttributes.putAttribute(SdkExecutionAttribute.SERVICE_CONFIG, controlConfiguration); - - interceptor.modifyHttpRequest(new Context(request), executionAttributes); - } - - @Test - public void outpostBucketArn_shouldResolveHost() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - Arn arn = Arn.fromString("arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"); - executionAttributes.putAttribute(S3_ARNABLE_FIELD, S3ArnableField.builder().arn(arn).build()); - SdkHttpRequest modifiedRequest = interceptor.modifyHttpRequest(new Context(request), executionAttributes); - - assertThat(modifiedRequest.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); - assertThat(modifiedRequest.headers().get("x-amz-outpost-id").get(0)).isEqualTo("op-01234567890123456"); - assertThat(modifiedRequest.headers().get("x-amz-account-id").get(0)).isEqualTo(ACCOUNT_ID); - } - - @Test - public void outpostAccessPointArn_shouldResolveHost() { - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - - Arn arn = Arn.fromString("arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint"); - executionAttributes.putAttribute(S3_ARNABLE_FIELD, S3ArnableField.builder().arn(arn).build()); - SdkHttpRequest modifiedRequest = interceptor.modifyHttpRequest(new Context(request), executionAttributes); - - assertThat(modifiedRequest.host()).isEqualTo("s3-outposts.us-east-1.amazonaws.com"); - assertThat(executionAttributes.getAttribute(SERVICE_SIGNING_NAME)).isEqualTo("s3-outposts"); - assertThat(modifiedRequest.headers().get("x-amz-outpost-id").get(0)).isEqualTo("op-01234567890123456"); - assertThat(modifiedRequest.headers().get("x-amz-account-id").get(0)).isEqualTo(ACCOUNT_ID); - } - - @Test - public void outpostArnWithFipsEnabled_shouldThrowException() { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("FIPS"); - - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - Arn arn = Arn.fromString("arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"); - executionAttributes.putAttribute(S3_ARNABLE_FIELD, S3ArnableField.builder().arn(arn).build()); - executionAttributes.putAttribute(SERVICE_CONFIG, enableFips()); - interceptor.modifyHttpRequest(new Context(request), executionAttributes); - } - - @Test - public void outpostArnWithDualstackEnabled_shouldThrowException() { - thrown.expect(IllegalArgumentException.class); - thrown.expectMessage("Dual stack"); - - EndpointAddressInterceptor interceptor = new EndpointAddressInterceptor(); - Arn arn = Arn.fromString("arn:aws:s3-outposts:us-east-1:123456789012:outpost:op-01234567890123456:bucket:mybucket"); - executionAttributes.putAttribute(S3_ARNABLE_FIELD, S3ArnableField.builder().arn(arn).build()); - executionAttributes.putAttribute(SERVICE_CONFIG, enableDualstack()); - interceptor.modifyHttpRequest(new Context(request), executionAttributes); - } - - private S3ControlConfiguration enableDualstack() { - return S3ControlConfiguration.builder() - .dualstackEnabled(true) - .build(); - } - - private S3ControlConfiguration enableFips() { - return S3ControlConfiguration.builder() - .fipsModeEnabled(true) - .build(); - } - - public final class Context implements software.amazon.awssdk.core.interceptor.Context.ModifyHttpRequest { - - private final SdkHttpRequest request; - private SdkRequest sdkRequest = CreateBucketRequest.builder().build(); - - public Context(SdkHttpRequest request) { - this.request = request; - } - - public Context request(SdkRequest sdkRequest) { - this.sdkRequest = sdkRequest; - return this; - } - - @Override - public SdkRequest request() { - return sdkRequest; - } - - @Override - public SdkHttpRequest httpRequest() { - return request; - } - - @Override - public Optional requestBody() { - return Optional.empty(); - } - - @Override - public Optional asyncRequestBody() { - return Optional.empty(); - } - } -} diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index 1bf58c4535e..76d4e9c7713 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index 5a015027b4f..46f117eebfa 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index ab6de9858fc..8828edabe98 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -279,7 +279,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

    Creates an endpoint using the endpoint configuration specified in the request. SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

    Use this API to deploy models using SageMaker hosting services.

    For an example that calls this method when deploying a model to SageMaker hosting services, see the Create Endpoint example notebook.

    You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

    The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

    When it receives the request, SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

    When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

    When SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

    If any of the models hosted at this endpoint get model data from an Amazon S3 location, SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your Amazon Web Services account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

    To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

    • Option 1: For a full SageMaker access, search and attach the AmazonSageMakerFullAccess policy.

    • Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:

      \"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]

      \"Resource\": [

      \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"

      \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"

      ]

      For more information, see SageMaker API Permissions: Actions, Permissions, and Resources Reference.

    " + "documentation":"

    Creates an endpoint using the endpoint configuration specified in the request. SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

    Use this API to deploy models using SageMaker hosting services.

    You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

    The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.

    When it receives the request, SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

    When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

    When SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

    If any of the models hosted at this endpoint get model data from an Amazon S3 location, SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services STS is activated in your Amazon Web Services account by default. If you previously deactivated Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information, see Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services Identity and Access Management User Guide.

    To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search the IAM role that you want to grant access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to the role.

    • Option 1: For a full SageMaker access, search and attach the AmazonSageMakerFullAccess policy.

    • Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the JSON file of the IAM role:

      \"Action\": [\"sagemaker:CreateEndpoint\", \"sagemaker:CreateEndpointConfig\"]

      \"Resource\": [

      \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"

      \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"

      ]

      For more information, see SageMaker API Permissions: Actions, Permissions, and Resources Reference.

    " }, "CreateEndpointConfig":{ "name":"CreateEndpointConfig", @@ -8672,7 +8672,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

    A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

    " + "documentation":"

    A list of key value pairs associated with the model. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

    If you supply ModelPackageGroupName, your model package belongs to the model group you specify and uses the tags associated with the model group. In this case, you cannot supply a tag argument.

    " }, "ModelApprovalStatus":{ "shape":"ModelApprovalStatus", @@ -29167,6 +29167,10 @@ "SupportedEndpointType":{ "shape":"RecommendationJobSupportedEndpointType", "documentation":"

    The endpoint type to receive recommendations for. By default this is null, and the results of the inference recommendation job return a combined list of both real-time and serverless benchmarks. By specifying a value for this field, you can receive a longer list of benchmarks for the desired endpoint type.

    " + }, + "SupportedResponseMIMETypes":{ + "shape":"RecommendationJobSupportedResponseMIMETypes", + "documentation":"

    The supported MIME types for the output data.

    " } }, "documentation":"

    Specifies mandatory fields for running an Inference Recommender job directly in the CreateInferenceRecommendationsJob API. The fields specified in ContainerConfig override the corresponding fields in the model package. Use ContainerConfig if you want to specify these fields for the recommendation job but don't want to edit them in your model package.

    " @@ -29342,6 +29346,15 @@ "type":"list", "member":{"shape":"String"} }, + "RecommendationJobSupportedResponseMIMEType":{ + "type":"string", + "max":1024, + "pattern":"^[-\\w]+\\/.+$" + }, + "RecommendationJobSupportedResponseMIMETypes":{ + "type":"list", + "member":{"shape":"RecommendationJobSupportedResponseMIMEType"} + }, "RecommendationJobType":{ "type":"string", "enum":[ diff --git a/services/sagemakera2iruntime/pom.xml b/services/sagemakera2iruntime/pom.xml index b989ee0caeb..1ff27e38e4b 100644 --- a/services/sagemakera2iruntime/pom.xml +++ b/services/sagemakera2iruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index e5be558fa38..192be7deebc 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index aee2c052add..cf6daf336fe 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakergeospatial/pom.xml b/services/sagemakergeospatial/pom.xml index 8dc297b4967..0906039ad04 100644 --- a/services/sagemakergeospatial/pom.xml +++ b/services/sagemakergeospatial/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sagemakergeospatial AWS Java SDK :: Services :: Sage Maker Geospatial diff --git a/services/sagemakermetrics/pom.xml b/services/sagemakermetrics/pom.xml index ae3b905268f..7b493e381f7 100644 --- a/services/sagemakermetrics/pom.xml +++ b/services/sagemakermetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sagemakermetrics AWS Java SDK :: Services :: Sage Maker Metrics diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index 9805741fb18..ec1540e884c 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 061a8bf1f80..e9114b474e4 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/scheduler/pom.xml b/services/scheduler/pom.xml index f7cc8545bbe..5e950fd6477 100644 --- a/services/scheduler/pom.xml +++ b/services/scheduler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT scheduler AWS Java SDK :: Services :: Scheduler diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 08aa38e4384..ec1ebf06572 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 7fad7482eef..3a4813a60b4 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index 049d03dce9b..a8a151a4172 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/customization.config b/services/securityhub/src/main/resources/codegen-resources/customization.config index 045846615ab..4f8246fb980 100644 --- a/services/securityhub/src/main/resources/codegen-resources/customization.config +++ b/services/securityhub/src/main/resources/codegen-resources/customization.config @@ -9,7 +9,7 @@ "listInvitations", "listMembers" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "getEnabledStandards", "getInsights" ] diff --git a/services/securityhub/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/securityhub/src/main/resources/codegen-resources/endpoint-rule-set.json index bcaddb83461..659462cfddb 100644 --- a/services/securityhub/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/securityhub/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://securityhub-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://securityhub-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://securityhub-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://securityhub-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://securityhub.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://securityhub.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://securityhub.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://securityhub.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index db6338f2b77..3e32c2511bc 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -396,7 +396,7 @@ {"shape":"LimitExceededException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Deletes the specified member accounts from Security Hub.

    Can be used to delete member accounts that belong to an organization as well as member accounts that were invited manually.

    " + "documentation":"

    Deletes the specified member accounts from Security Hub.

    You can invoke this API only to delete accounts that became members through invitation. You can't invoke this API to delete accounts that belong to an Organizations organization.

    " }, "DescribeActionTargets":{ "name":"DescribeActionTargets", @@ -541,7 +541,7 @@ {"shape":"InvalidAccessException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Disables Security Hub in your account only in the current Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub.

    When you disable Security Hub for an administrator account, it doesn't disable Security Hub for any associated member accounts.

    When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and cannot be recovered. Any standards that were enabled are disabled, and your administrator and member account associations are removed.

    If you want to save your existing findings, you must export them before you disable Security Hub.

    " + "documentation":"

    Disables Security Hub in your account only in the current Amazon Web Services Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub.

    You can't disable Security Hub in an account that is currently the Security Hub administrator.

    When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and cannot be recovered. Any standards that were enabled are disabled, and your administrator and member account associations are removed.

    If you want to save your existing findings, you must export them before you disable Security Hub.

    " }, "DisassociateFromAdministratorAccount":{ "name":"DisassociateFromAdministratorAccount", @@ -12768,6 +12768,10 @@ "Sample":{ "shape":"Boolean", "documentation":"

    Indicates whether the finding is a sample finding.

    " + }, + "GeneratorDetails":{ + "shape":"GeneratorDetails", + "documentation":"

    Provides metadata for the Amazon CodeGuru detector associated with a finding. This field pertains to findings that relate to Lambda functions. Amazon Inspector identifies policy violations and vulnerabilities in Lambda function code based on internal detectors developed in collaboration with Amazon CodeGuru. Security Hub receives those findings.

    " } }, "documentation":"

    Provides a consistent format for Security Hub findings. AwsSecurityFinding format allows you to share findings between Amazon Web Services security services and third-party solutions.

    A finding is a potential security issue generated either by Amazon Web Services services or by the integrated third-party solutions and standards checks.

    " @@ -14629,6 +14633,28 @@ }, "documentation":"

    Provides details about the current status of the sensitive data detection.

    " }, + "CodeVulnerabilitiesFilePath":{ + "type":"structure", + "members":{ + "EndLine":{ + "shape":"Integer", + "documentation":"

    The line number of the last line of code in which the vulnerability is located.

    " + }, + "FileName":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the file in which the code vulnerability is located.

    " + }, + "FilePath":{ + "shape":"NonEmptyString", + "documentation":"

    The file path to the code in which the vulnerability is located.

    " + }, + "StartLine":{ + "shape":"Integer", + "documentation":"

    The line number of the first line of code in which the vulnerability is located.

    " + } + }, + "documentation":"

    Provides details about where a code vulnerability is located in your Lambda function.

    " + }, "Compliance":{ "type":"structure", "members":{ @@ -15720,6 +15746,24 @@ "type":"list", "member":{"shape":"FirewallPolicyStatelessRuleGroupReferencesDetails"} }, + "GeneratorDetails":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"NonEmptyString", + "documentation":"

    The name of the detector used to identify the code vulnerability.

    " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

    The description of the detector used to identify the code vulnerability.

    " + }, + "Labels":{ + "shape":"TypeList", + "documentation":"

    An array of tags used to identify the detector associated with the finding.

    " + } + }, + "documentation":"

    Provides metadata for the Amazon CodeGuru detector associated with a finding. This field pertains to findings that relate to Lambda functions. Amazon Inspector identifies policy violations and vulnerabilities in Lambda function code based on internal detectors developed in collaboration with Amazon CodeGuru. Security Hub receives those findings.

    " + }, "GeoLocation":{ "type":"structure", "members":{ @@ -19497,10 +19541,51 @@ "FixAvailable":{ "shape":"VulnerabilityFixAvailable", "documentation":"

    Specifies if all vulnerable packages in a finding have a value for FixedInVersion and Remediation. This field is evaluated for each vulnerability Id based on the number of vulnerable packages that have a value for both FixedInVersion and Remediation. Valid values are as follows:

    • YES if all vulnerable packages have a value for both FixedInVersion and Remediation

    • NO if no vulnerable packages have a value for FixedInVersion and Remediation

    • PARTIAL otherwise

    " + }, + "EpssScore":{ + "shape":"Double", + "documentation":"

    The Exploit Prediction Scoring System (EPSS) score for a finding.

    " + }, + "ExploitAvailable":{ + "shape":"VulnerabilityExploitAvailable", + "documentation":"

    Whether an exploit is available for a finding.

    " + }, + "CodeVulnerabilities":{ + "shape":"VulnerabilityCodeVulnerabilitiesList", + "documentation":"

    The vulnerabilities found in your Lambda function code. This field pertains to findings that Security Hub receives from Amazon Inspector.

    " } }, "documentation":"

    A vulnerability associated with a finding.

    " }, + "VulnerabilityCodeVulnerabilities":{ + "type":"structure", + "members":{ + "Cwes":{ + "shape":"TypeList", + "documentation":"

    The Common Weakness Enumeration (CWE) item associated with the detected code vulnerability.

    " + }, + "FilePath":{ + "shape":"CodeVulnerabilitiesFilePath", + "documentation":"

    Provides details about where a code vulnerability is located in your Lambda function.

    " + }, + "SourceArn":{ + "shape":"NonEmptyString", + "documentation":"

    The Amazon Resource Name (ARN) of the Lambda layer in which the code vulnerability is located.

    " + } + }, + "documentation":"

    Provides details about the vulnerabilities found in your Lambda function code. This field pertains to findings that Security Hub receives from Amazon Inspector.

    " + }, + "VulnerabilityCodeVulnerabilitiesList":{ + "type":"list", + "member":{"shape":"VulnerabilityCodeVulnerabilities"} + }, + "VulnerabilityExploitAvailable":{ + "type":"string", + "enum":[ + "YES", + "NO" + ] + }, "VulnerabilityFixAvailable":{ "type":"string", "enum":[ @@ -19582,7 +19667,7 @@ "documentation":"

    The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue.

    The allowed values are the following.

    • NEW - The initial state of a finding, before it is reviewed.

      Security Hub also resets the workflow status from NOTIFIED or RESOLVED to NEW in the following cases:

      • RecordState changes from ARCHIVED to ACTIVE.

      • ComplianceStatus changes from PASSED to either WARNING, FAILED, or NOT_AVAILABLE.

    • NOTIFIED - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.

    • SUPPRESSED - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated.

    • RESOLVED - The finding was reviewed and remediated and is now considered resolved.

    " } }, - "documentation":"

    Provides information about the status of the investigation into a finding.

    " + "documentation":"

    Provides details about the status of the investigation into a finding.

    " }, "WorkflowState":{ "type":"string", @@ -19616,5 +19701,5 @@ "documentation":"

    Used to update information about the investigation into the finding.

    " } }, - "documentation":"

    Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from Amazon Web Services accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the Security HubUser Guide.

    When you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, run the same command for each Region in which you want to apply the change.

    For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

    The following throttling limits apply to using Security Hub API operations.

    • BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second.

    • GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second.

    • BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    • BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    • UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second.

    • All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    " + "documentation":"

    Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from Amazon Web Services accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the Security Hub User Guide .

    When you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, run the same command for each Region in which you want to apply the change.

    For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

    The following throttling limits apply to using Security Hub API operations.

    • BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second.

    • GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second.

    • BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    • BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    • UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second.

    • All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second.

    " } diff --git a/services/securitylake/pom.xml b/services/securitylake/pom.xml index 5653d935b80..a7440eafcad 100644 --- a/services/securitylake/pom.xml +++ b/services/securitylake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT securitylake AWS Java SDK :: Services :: Security Lake diff --git a/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json b/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json index 44e4b725ce1..35e974c2b0f 100644 --- a/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/securitylake/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,55 +1,55 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://securitylake-fips.us-gov-east-1.api.aws" + "url": "https://securitylake-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake-fips.us-gov-east-1.amazonaws.com" + "url": "https://securitylake-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://securitylake.us-gov-east-1.api.aws" + "url": "https://securitylake.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake.us-gov-east-1.amazonaws.com" + "url": "https://securitylake.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": true, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { - "UseDualStack": true, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,109 +99,109 @@ } }, "params": { - "UseDualStack": false, + "Region": "cn-north-1", "UseFIPS": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://securitylake-fips.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://securitylake-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://securitylake.us-gov-east-1.api.aws" + } }, "params": { - "UseDualStack": true, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake.us-iso-east-1.c2s.ic.gov" + "url": "https://securitylake.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://securitylake-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake-fips.us-east-1.amazonaws.com" + "url": "https://securitylake-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://securitylake.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://securitylake.us-east-1.amazonaws.com" + "url": "https://securitylake.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": false, + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": true, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": true, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -247,22 +247,35 @@ } }, "params": { - "UseDualStack": false, + "Region": "us-isob-east-1", "UseFIPS": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { "UseFIPS": false, - "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -272,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/securitylake/src/main/resources/codegen-resources/service-2.json b/services/securitylake/src/main/resources/codegen-resources/service-2.json index fd01ceeef35..2aadfa9cc22 100644 --- a/services/securitylake/src/main/resources/codegen-resources/service-2.json +++ b/services/securitylake/src/main/resources/codegen-resources/service-2.json @@ -1833,7 +1833,8 @@ }, "NextToken":{ "type":"string", - "pattern":"^[\\\\\\w\\-_:/.@=+]*$" + "max":2048, + "min":0 }, "NotificationConfiguration":{ "type":"structure", diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index 6c3d2532f72..c237529a08d 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config b/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config index fece2bf044f..2fb45a9d3f0 100644 --- a/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config +++ b/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,4 @@ { - "blacklistedSimpleMethods" : ["createApplication"], + "excludedSimpleMethods" : ["createApplication"], "verifiedSimpleMethods" : ["listApplications"] } diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index cfefb45193e..bb4d32f1569 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index 5a4fa7ee125..2095039e90c 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index 34e9cd51d09..910b63c9493 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index b2d90fbba0c..e90eaf2e3c4 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/servicequotas/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/servicequotas/src/main/resources/codegen-resources/endpoint-rule-set.json index 05489b7fa44..df63941e748 100644 --- a/services/servicequotas/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/servicequotas/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -32,13 +32,12 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], "type": "tree", @@ -46,21 +45,69 @@ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], "type": "tree", @@ -75,13 +122,17 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], "type": "tree", "rules": [ { @@ -89,143 +140,86 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsFIPS" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, + ], + "type": "tree", + "rules": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] + "conditions": [], + "endpoint": { + "url": "https://servicequotas-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - } - ], - "type": "tree", - "rules": [ + }, { "conditions": [], - "endpoint": { - "url": "https://servicequotas-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] } ], "type": "tree", "rules": [ { - "conditions": [], + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], "type": "tree", "rules": [ { @@ -247,7 +241,7 @@ } ], "endpoint": { - "url": "https://servicequotas.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://servicequotas.{Region}.amazonaws.com", "properties": {}, "headers": {} }, @@ -263,78 +257,83 @@ "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ] }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://servicequotas.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, { "conditions": [], - "endpoint": { - "url": "https://servicequotas.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://servicequotas.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://servicequotas.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/servicequotas/src/main/resources/codegen-resources/endpoint-tests.json b/services/servicequotas/src/main/resources/codegen-resources/endpoint-tests.json index a7b637a9d4d..c4b5fe6484a 100644 --- a/services/servicequotas/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/servicequotas/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,822 +1,29 @@ { "testCases": [ { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-gov-east-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-gov-east-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.me-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.me-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ca-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ca-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ca-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-central-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-central-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-central-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.us-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "us-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.af-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.af-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "af-south-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-north-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-north-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-north-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-3", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-2", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": true - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.eu-west-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "eu-west-1", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-northeast-3.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-3", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-northeast-2.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "ap-northeast-2", - "UseFIPS": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": true - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-northeast-1.amazonaws.com" + "url": "https://servicequotas.af-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-northeast-1", - "UseFIPS": true + "Region": "af-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-northeast-1.api.aws" + "url": "https://servicequotas.ap-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-northeast-1", - "UseFIPS": false + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -827,594 +34,547 @@ } }, "params": { - "UseDualStack": false, "Region": "ap-northeast-1", - "UseFIPS": false - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://servicequotas-fips.me-south-1.amazonaws.com" - } - }, - "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": true - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://servicequotas.me-south-1.api.aws" - } - }, - "params": { - "UseDualStack": true, - "Region": "me-south-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.me-south-1.amazonaws.com" + "url": "https://servicequotas.ap-northeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "me-south-1", - "UseFIPS": false + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.sa-east-1.api.aws" + "url": "https://servicequotas.ap-northeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.sa-east-1.amazonaws.com" + "url": "https://servicequotas.ap-south-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": true + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.sa-east-1.api.aws" + "url": "https://servicequotas.ap-southeast-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.sa-east-1.amazonaws.com" + "url": "https://servicequotas.ap-southeast-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "sa-east-1", - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-east-1.api.aws" + "url": "https://servicequotas.ap-southeast-3.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "ap-southeast-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-east-1.amazonaws.com" + "url": "https://servicequotas.ca-central-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": true + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-east-1.api.aws" + "url": "https://servicequotas.eu-central-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-east-1.amazonaws.com" + "url": "https://servicequotas.eu-north-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-east-1", - "UseFIPS": false + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://servicequotas.eu-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.cn-north-1.amazonaws.com.cn" + "url": "https://servicequotas.eu-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://servicequotas.eu-west-2.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.cn-north-1.amazonaws.com.cn" + "url": "https://servicequotas.eu-west-3.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "cn-north-1", - "UseFIPS": false + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.us-gov-west-1.api.aws" + "url": "https://servicequotas.me-south-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-gov-west-1.amazonaws.com" + "url": "https://servicequotas.sa-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": true + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-gov-west-1.api.aws" + "url": "https://servicequotas.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-gov-west-1.amazonaws.com" + "url": "https://servicequotas.us-east-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "us-gov-west-1", - "UseFIPS": false + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-1.api.aws" + "url": "https://servicequotas.us-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-1.amazonaws.com" + "url": "https://servicequotas.us-west-2.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": true + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-1.api.aws" + "url": "https://servicequotas-fips.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-1.amazonaws.com" + "url": "https://servicequotas-fips.us-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-1", - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-2.api.aws" + "url": "https://servicequotas.us-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-2.amazonaws.com" + "url": "https://servicequotas-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-2.api.aws" + "url": "https://servicequotas-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-2.amazonaws.com" + "url": "https://servicequotas.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-2", - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-3.api.aws" + "url": "https://servicequotas.cn-north-1.amazonaws.com.cn" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.ap-southeast-3.amazonaws.com" + "url": "https://servicequotas.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-3.api.aws" + "url": "https://servicequotas.us-gov-east-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.ap-southeast-3.amazonaws.com" + "url": "https://servicequotas.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": false, - "Region": "ap-southeast-3", - "UseFIPS": false + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.us-east-1.api.aws" + "url": "https://servicequotas.us-gov-west-1.amazonaws.com" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.us-east-1.amazonaws.com" + "url": "https://servicequotas-fips.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-east-1.api.aws" + "url": "https://servicequotas.us-gov-east-1.api.aws" } }, "params": { - "UseDualStack": true, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://servicequotas.us-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-1", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.us-east-2.api.aws" + "url": "https://servicequotas-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://servicequotas-fips.us-east-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.us-east-2.api.aws" + "url": "https://servicequotas.us-iso-east-1.c2s.ic.gov" } }, "params": { - "UseDualStack": true, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://servicequotas.us-east-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseDualStack": false, - "Region": "us-east-2", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas-fips.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://servicequotas-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://servicequotas-fips.cn-northwest-1.amazonaws.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.cn-northwest-1.api.amazonwebservices.com.cn" + "url": "https://servicequotas.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "UseDualStack": true, - "Region": "cn-northwest-1", - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://servicequotas.cn-northwest-1.amazonaws.com.cn" + "url": "https://example.com" } }, "params": { + "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "Region": "cn-northwest-1", - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseDualStack": false, - "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1424,9 +584,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseDualStack": false, "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1436,11 +596,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseDualStack": true, "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/servicequotas/src/main/resources/codegen-resources/service-2.json b/services/servicequotas/src/main/resources/codegen-resources/service-2.json index 073012f5c3c..5bcc52fae10 100644 --- a/services/servicequotas/src/main/resources/codegen-resources/service-2.json +++ b/services/servicequotas/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"TemplatesNotAvailableInRegionException"}, {"shape":"NoAvailableOrganizationException"} ], - "documentation":"

    Associates your quota request template with your organization. When a new account is created in your organization, the quota increase requests in the template are automatically applied to the account. You can add a quota increase request for any adjustable quota to your template.

    " + "documentation":"

    Associates your quota request template with your organization. When a new Amazon Web Services account is created in your organization, the quota increase requests in the template are automatically applied to the account. You can add a quota increase request for any adjustable quota to your template.

    " }, "DeleteServiceQuotaIncreaseRequestFromTemplate":{ "name":"DeleteServiceQuotaIncreaseRequestFromTemplate", @@ -71,7 +71,7 @@ {"shape":"TemplatesNotAvailableInRegionException"}, {"shape":"NoAvailableOrganizationException"} ], - "documentation":"

    Disables your quota request template. After a template is disabled, the quota increase requests in the template are not applied to new accounts in your organization. Disabling a quota request template does not apply its quota increase requests.

    " + "documentation":"

    Disables your quota request template. After a template is disabled, the quota increase requests in the template are not applied to new Amazon Web Services accounts in your organization. Disabling a quota request template does not apply its quota increase requests.

    " }, "GetAWSDefaultServiceQuota":{ "name":"GetAWSDefaultServiceQuota", @@ -181,7 +181,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Lists the default values for the quotas for the specified AWS service. A default value does not reflect any quota increases.

    " + "documentation":"

    Lists the default values for the quotas for the specified Amazon Web Service. A default value does not reflect any quota increases.

    " }, "ListRequestedServiceQuotaChangeHistory":{ "name":"ListRequestedServiceQuotaChangeHistory", @@ -199,7 +199,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Retrieves the quota increase requests for the specified service.

    " + "documentation":"

    Retrieves the quota increase requests for the specified Amazon Web Service.

    " }, "ListRequestedServiceQuotaChangeHistoryByQuota":{ "name":"ListRequestedServiceQuotaChangeHistoryByQuota", @@ -255,7 +255,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Lists the applied quota values for the specified AWS service. For some quotas, only the default values are available. If the applied quota value is not available for a quota, the quota is not retrieved.

    " + "documentation":"

    Lists the applied quota values for the specified Amazon Web Service. For some quotas, only the default values are available. If the applied quota value is not available for a quota, the quota is not retrieved.

    " }, "ListServices":{ "name":"ListServices", @@ -272,7 +272,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

    Lists the names and codes for the services integrated with Service Quotas.

    " + "documentation":"

    Lists the names and codes for the Amazon Web Services integrated with Service Quotas.

    " }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -394,6 +394,14 @@ "min":1, "pattern":"arn:aws(-[\\w]+)*:*:.+:[0-9]{12}:.+" }, + "AppliedLevelEnum":{ + "type":"string", + "enum":[ + "ACCOUNT", + "RESOURCE", + "ALL" + ] + }, "AssociateServiceQuotaTemplateRequest":{ "type":"structure", "members":{ @@ -422,15 +430,15 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    Specifies the Amazon Web Services Region for which the request was made.

    " } } }, @@ -472,7 +480,7 @@ "members":{ "ErrorCode":{ "shape":"ErrorCode", - "documentation":"

    Service Quotas returns the following error values:

    • DEPENDENCY_ACCESS_DENIED_ERROR - The caller does not have the required permissions to complete the action. To resolve the error, you must have permission to access the service or quota.

    • DEPENDENCY_THROTTLING_ERROR - The service is throttling Service Quotas.

    • DEPENDENCY_SERVICE_ERROR - The service is not available.

    • SERVICE_QUOTA_NOT_AVAILABLE_ERROR - There was an error in Service Quotas.

    " + "documentation":"

    Service Quotas returns the following error values:

    • DEPENDENCY_ACCESS_DENIED_ERROR - The caller does not have the required permissions to complete the action. To resolve the error, you must have permission to access the Amazon Web Service or quota.

    • DEPENDENCY_THROTTLING_ERROR - The Amazon Web Service is throttling Service Quotas.

    • DEPENDENCY_SERVICE_ERROR - The Amazon Web Service is not available.

    • SERVICE_QUOTA_NOT_AVAILABLE_ERROR - There was an error in Service Quotas.

    " }, "ErrorMessage":{ "shape":"ErrorMessage", @@ -491,11 +499,11 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " } } }, @@ -518,7 +526,7 @@ "members":{ "ServiceQuotaTemplateAssociationStatus":{ "shape":"ServiceQuotaTemplateAssociationStatus", - "documentation":"

    The association status. If the status is ASSOCIATED, the quota increase requests in the template are automatically applied to new accounts in your organization.

    " + "documentation":"

    The association status. If the status is ASSOCIATED, the quota increase requests in the template are automatically applied to new Amazon Web Services accounts in your organization.

    " } } }, @@ -528,7 +536,7 @@ "members":{ "RequestId":{ "shape":"RequestId", - "documentation":"

    The ID of the quota increase request.

    " + "documentation":"

    Specifies the ID of the quota increase request.

    " } } }, @@ -551,15 +559,15 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    Specifies the Amazon Web Services Region for which you made the request.

    " } } }, @@ -581,11 +589,15 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " + }, + "ContextId":{ + "shape":"QuotaContextId", + "documentation":"

    Specifies the Amazon Web Services account or resource to which the quota applies. The value in this field depends on the context scope associated with the specified service quota.

    " } } }, @@ -638,15 +650,15 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " } } }, @@ -655,7 +667,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "Quotas":{ "shape":"ServiceQuotaListDefinition", @@ -672,23 +684,27 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "Status":{ "shape":"RequestStatus", - "documentation":"

    The status value of the quota increase request.

    " + "documentation":"

    Specifies that you want to filter the results to only the requests with the matching status.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "QuotaRequestedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level within the Amazon Web Services account the quota request applies to.

    " } } }, @@ -697,7 +713,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "RequestedQuotas":{ "shape":"RequestedServiceQuotaChangeHistoryListDefinition", @@ -710,19 +726,23 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "Status":{ "shape":"RequestStatus", - "documentation":"

    The status of the quota increase request.

    " + "documentation":"

    Specifies that you want to filter the results to only the requests with the matching status.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "QuotaRequestedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level within the Amazon Web Services account the quota request applies to.

    " } } }, @@ -731,7 +751,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "RequestedQuotas":{ "shape":"RequestedServiceQuotaChangeHistoryListDefinition", @@ -744,19 +764,19 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    Specifies the Amazon Web Services Region for which you made the request.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " } } }, @@ -769,7 +789,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " } } }, @@ -779,15 +799,23 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + }, + "QuotaCode":{ + "shape":"QuotaCode", + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " + }, + "QuotaAppliedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level of granularity that the quota value is applied.

    " } } }, @@ -796,7 +824,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "Quotas":{ "shape":"ServiceQuotaListDefinition", @@ -809,11 +837,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token for the next page of results.

    " + "documentation":"

    Specifies a value for receiving additional results after you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from.

    " }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of results to return with a single call. To retrieve the remaining results, if any, make another call with the token returned from this call.

    " + "documentation":"

    Specifies the maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results.

    An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " } } }, @@ -822,11 +850,11 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    " + "documentation":"

    If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null.

    " }, "Services":{ "shape":"ServiceInfoListDefinition", - "documentation":"

    Information about the services.

    " + "documentation":"

    The list of the Amazon Web Service names and service codes.

    " } } }, @@ -836,7 +864,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) for the applied quota for which you want to list tags. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas AWS CLI command or the ListServiceQuotas AWS API operation.

    " + "documentation":"

    The Amazon Resource Name (ARN) for the applied quota for which you want to list tags. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas CLI command or the ListServiceQuotas Amazon Web Services API operation.

    " } } }, @@ -894,7 +922,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The account making this call is not a member of an organization.

    ", + "documentation":"

    The Amazon Web Services account making this call is not a member of an organization.

    ", "exception":true }, "NoSuchResourceException":{ @@ -910,7 +938,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The organization that your account belongs to is not in All Features mode.

    ", + "documentation":"

    The organization that your Amazon Web Services account belongs to is not in All Features mode.

    ", "exception":true }, "OutputTags":{ @@ -942,19 +970,19 @@ "members":{ "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    Specifies the Amazon Web Services Region to which the template applies.

    " }, "DesiredValue":{ "shape":"QuotaValue", - "documentation":"

    The new, increased value for the quota.

    " + "documentation":"

    Specifies the new, increased value for the quota.

    " } } }, @@ -975,6 +1003,33 @@ "min":1, "pattern":"[a-zA-Z][a-zA-Z0-9-]{1,128}" }, + "QuotaContextId":{"type":"string"}, + "QuotaContextInfo":{ + "type":"structure", + "members":{ + "ContextScope":{ + "shape":"QuotaContextScope", + "documentation":"

    Specifies whether the quota applies to an Amazon Web Services account, or to a resource.

    " + }, + "ContextScopeType":{ + "shape":"QuotaContextScopeType", + "documentation":"

    When the ContextScope is RESOURCE, then this specifies the resource type of the specified resource.

    " + }, + "ContextId":{ + "shape":"QuotaContextId", + "documentation":"

    Specifies the Amazon Web Services account or resource to which the quota applies. The value in this field depends on the context scope associated with the specified service quota.

    " + } + }, + "documentation":"

    A structure that describes the context for a service quota. The context identifies what the quota applies to.

    " + }, + "QuotaContextScope":{ + "type":"string", + "enum":[ + "RESOURCE", + "ACCOUNT" + ] + }, + "QuotaContextScopeType":{"type":"string"}, "QuotaExceededException":{ "type":"structure", "members":{ @@ -991,7 +1046,7 @@ "members":{ "PeriodValue":{ "shape":"PeriodValue", - "documentation":"

    The value.

    " + "documentation":"

    The value associated with the reported PeriodUnit.

    " }, "PeriodUnit":{ "shape":"PeriodUnit", @@ -1022,15 +1077,19 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "DesiredValue":{ "shape":"QuotaValue", - "documentation":"

    The new, increased value for the quota.

    " + "documentation":"

    Specifies the new, increased value for the quota.

    " + }, + "ContextId":{ + "shape":"QuotaContextId", + "documentation":"

    Specifies the Amazon Web Services account or resource to which the quota applies. The value in this field depends on the context scope associated with the specified service quota.

    " } } }, @@ -1050,7 +1109,9 @@ "CASE_OPENED", "APPROVED", "DENIED", - "CASE_CLOSED" + "CASE_CLOSED", + "NOT_APPROVED", + "INVALID_REQUEST" ] }, "RequestedServiceQuotaChange":{ @@ -1066,19 +1127,19 @@ }, "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "ServiceName":{ "shape":"ServiceName", - "documentation":"

    The service name.

    " + "documentation":"

    Specifies the service name.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "QuotaName":{ "shape":"QuotaName", - "documentation":"

    The quota name.

    " + "documentation":"

    Specifies the quota name.

    " }, "DesiredValue":{ "shape":"QuotaValue", @@ -1111,6 +1172,14 @@ "Unit":{ "shape":"QuotaUnit", "documentation":"

    The unit of measurement.

    " + }, + "QuotaRequestedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level within the Amazon Web Services account the quota request applies to.

    " + }, + "QuotaContext":{ + "shape":"QuotaContextInfo", + "documentation":"

    The context for this service quota.

    " } }, "documentation":"

    Information about a quota increase request.

    " @@ -1148,14 +1217,14 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "ServiceName":{ "shape":"ServiceName", - "documentation":"

    The service name.

    " + "documentation":"

    Specifies the service name.

    " } }, - "documentation":"

    Information about a service.

    " + "documentation":"

    Information about an Amazon Web Service.

    " }, "ServiceInfoListDefinition":{ "type":"list", @@ -1167,11 +1236,11 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "ServiceName":{ "shape":"ServiceName", - "documentation":"

    The service name.

    " + "documentation":"

    Specifies the service name.

    " }, "QuotaArn":{ "shape":"QuotaArn", @@ -1179,11 +1248,11 @@ }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "QuotaName":{ "shape":"QuotaName", - "documentation":"

    The quota name.

    " + "documentation":"

    Specifies the quota name.

    " }, "Value":{ "shape":"QuotaValue", @@ -1212,6 +1281,14 @@ "ErrorReason":{ "shape":"ErrorReason", "documentation":"

    The error code and error reason.

    " + }, + "QuotaAppliedAtLevel":{ + "shape":"AppliedLevelEnum", + "documentation":"

    Specifies at which level of granularity that the quota value is applied.

    " + }, + "QuotaContext":{ + "shape":"QuotaContextInfo", + "documentation":"

    The context for this service quota.

    " } }, "documentation":"

    Information about a quota.

    " @@ -1221,19 +1298,19 @@ "members":{ "ServiceCode":{ "shape":"ServiceCode", - "documentation":"

    The service identifier.

    " + "documentation":"

    Specifies the service identifier. To find the service code value for an Amazon Web Services service, use the ListServices operation.

    " }, "ServiceName":{ "shape":"ServiceName", - "documentation":"

    The service name.

    " + "documentation":"

    Specifies the service name.

    " }, "QuotaCode":{ "shape":"QuotaCode", - "documentation":"

    The quota identifier.

    " + "documentation":"

    Specifies the quota identifier. To find the quota code for a specific quota, use the ListServiceQuotas operation, and look for the QuotaCode response in the output for the quota you want.

    " }, "QuotaName":{ "shape":"QuotaName", - "documentation":"

    The quota name.

    " + "documentation":"

    Specifies the quota name.

    " }, "DesiredValue":{ "shape":"QuotaValue", @@ -1241,7 +1318,7 @@ }, "AwsRegion":{ "shape":"AwsRegion", - "documentation":"

    The AWS Region.

    " + "documentation":"

    The Amazon Web Services Region.

    " }, "Unit":{ "shape":"QuotaUnit", @@ -1324,7 +1401,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) for the applied quota. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas AWS CLI command or the ListServiceQuotas AWS API operation.

    " + "documentation":"

    The Amazon Resource Name (ARN) for the applied quota. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas CLI command or the ListServiceQuotas Amazon Web Services API operation.

    " }, "Tags":{ "shape":"InputTags", @@ -1348,7 +1425,7 @@ "members":{ "Message":{"shape":"ExceptionMessage"} }, - "documentation":"

    The Service Quotas template is not available in this AWS Region.

    ", + "documentation":"

    The Service Quotas template is not available in this Amazon Web Services Region.

    ", "exception":true }, "TooManyRequestsException":{ @@ -1376,7 +1453,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) for the applied quota that you want to untag. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas AWS CLI command or the ListServiceQuotas AWS API operation.

    " + "documentation":"

    The Amazon Resource Name (ARN) for the applied quota that you want to untag. You can get this information by using the Service Quotas console, or by listing the quotas using the list-service-quotas CLI command or the ListServiceQuotas Amazon Web Services API operation.

    " }, "TagKeys":{ "shape":"InputTagKeys", @@ -1390,5 +1467,5 @@ } } }, - "documentation":"

    With Service Quotas, you can view and manage your quotas easily as your AWS workloads grow. Quotas, also referred to as limits, are the maximum number of resources that you can create in your AWS account. For more information, see the Service Quotas User Guide.

    " + "documentation":"

    With Service Quotas, you can view and manage your quotas easily as your Amazon Web Services workloads grow. Quotas, also referred to as limits, are the maximum number of resources that you can create in your Amazon Web Services account. For more information, see the Service Quotas User Guide.

    " } diff --git a/services/ses/pom.xml b/services/ses/pom.xml index ea8beadd35e..d00f2a7938a 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/ses/src/main/resources/codegen-resources/customization.config b/services/ses/src/main/resources/codegen-resources/customization.config index 9c5a65cc95b..232f5a35cdb 100644 --- a/services/ses/src/main/resources/codegen-resources/customization.config +++ b/services/ses/src/main/resources/codegen-resources/customization.config @@ -13,7 +13,7 @@ "listTemplates", "listVerifiedEmailAddresses" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "updateAccountSendingEnabled" ] } diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index e3ac772ae37..71547111054 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sesv2/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/sesv2/src/main/resources/codegen-resources/endpoint-rule-set.json index 1d567c5f974..900cfc973ee 100644 --- a/services/sesv2/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/sesv2/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://email-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://email-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://email-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://email.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://email.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://email.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://email.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/sesv2/src/main/resources/codegen-resources/paginators-1.json b/services/sesv2/src/main/resources/codegen-resources/paginators-1.json index 188b42d8a1d..5b3c2d8c3ac 100644 --- a/services/sesv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/sesv2/src/main/resources/codegen-resources/paginators-1.json @@ -50,6 +50,11 @@ "output_token": "NextToken", "limit_key": "PageSize" }, + "ListExportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, "ListImportJobs": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/sesv2/src/main/resources/codegen-resources/service-2.json b/services/sesv2/src/main/resources/codegen-resources/service-2.json index 668c5a618ab..ee62c7b3fda 100644 --- a/services/sesv2/src/main/resources/codegen-resources/service-2.json +++ b/services/sesv2/src/main/resources/codegen-resources/service-2.json @@ -29,6 +29,21 @@ ], "documentation":"

    Retrieves batches of metric data collected based on your sending activity.

    You can execute this operation no more than 16 times per second, and with at most 160 queries from the batches per second (cumulative).

    " }, + "CancelExportJob":{ + "name":"CancelExportJob", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/export-jobs/{JobId}/cancel" + }, + "input":{"shape":"CancelExportJobRequest"}, + "output":{"shape":"CancelExportJobResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Cancels an export job.

    " + }, "CreateConfigurationSet":{ "name":"CreateConfigurationSet", "http":{ @@ -202,6 +217,22 @@ ], "documentation":"

    Creates an email template. Email templates enable you to send personalized email to one or more destinations in a single API operation. For more information, see the Amazon SES Developer Guide.

    You can execute this operation no more than once per second.

    " }, + "CreateExportJob":{ + "name":"CreateExportJob", + "http":{ + "method":"POST", + "requestUri":"/v2/email/export-jobs" + }, + "input":{"shape":"CreateExportJobRequest"}, + "output":{"shape":"CreateExportJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

    Creates an export job for a data source and destination.

    You can execute this operation no more than once per second.

    " + }, "CreateImportJob":{ "name":"CreateImportJob", "http":{ @@ -625,6 +656,21 @@ ], "documentation":"

    Displays the template object (which includes the subject line, HTML part and text part) for the template you specify.

    You can execute this operation no more than once per second.

    " }, + "GetExportJob":{ + "name":"GetExportJob", + "http":{ + "method":"GET", + "requestUri":"/v2/email/export-jobs/{JobId}" + }, + "input":{"shape":"GetExportJobRequest"}, + "output":{"shape":"GetExportJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

    Provides information about an export job.

    " + }, "GetImportJob":{ "name":"GetImportJob", "http":{ @@ -640,6 +686,21 @@ ], "documentation":"

    Provides information about an import job.

    " }, + "GetMessageInsights":{ + "name":"GetMessageInsights", + "http":{ + "method":"GET", + "requestUri":"/v2/email/insights/{MessageId}/" + }, + "input":{"shape":"GetMessageInsightsRequest"}, + "output":{"shape":"GetMessageInsightsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Provides information about a specific message, including the from address, the subject, the recipient address, email tags, as well as events associated with the message.

    You can execute this operation no more than once per second.

    " + }, "GetSuppressedDestination":{ "name":"GetSuppressedDestination", "http":{ @@ -784,6 +845,20 @@ ], "documentation":"

    Lists the email templates present in your Amazon SES account in the current Amazon Web Services Region.

    You can execute this operation no more than once per second.

    " }, + "ListExportJobs":{ + "name":"ListExportJobs", + "http":{ + "method":"POST", + "requestUri":"/v2/email/list-export-jobs" + }, + "input":{"shape":"ListExportJobsRequest"}, + "output":{"shape":"ListExportJobsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

    Lists all of the export jobs.

    " + }, "ListImportJobs":{ "name":"ListImportJobs", "http":{ @@ -1556,6 +1631,33 @@ }, "documentation":"

    Represents the body of the email message.

    " }, + "Bounce":{ + "type":"structure", + "members":{ + "BounceType":{ + "shape":"BounceType", + "documentation":"

    The type of the bounce, as determined by SES. Can be one of UNDETERMINED, TRANSIENT, or PERMANENT

    " + }, + "BounceSubType":{ + "shape":"BounceSubType", + "documentation":"

    The subtype of the bounce, as determined by SES.

    " + }, + "DiagnosticCode":{ + "shape":"DiagnosticCode", + "documentation":"

    The status code issued by the reporting Message Transfer Authority (MTA). This field only appears if a delivery status notification (DSN) was attached to the bounce and the Diagnostic-Code was provided in the DSN.

    " + } + }, + "documentation":"

    Information about a Bounce event.

    " + }, + "BounceSubType":{"type":"string"}, + "BounceType":{ + "type":"string", + "enum":[ + "UNDETERMINED", + "TRANSIENT", + "PERMANENT" + ] + }, "BulkEmailContent":{ "type":"structure", "members":{ @@ -1632,6 +1734,25 @@ ] }, "CampaignId":{"type":"string"}, + "CancelExportJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The export job ID.

    ", + "location":"uri", + "locationName":"JobId" + } + }, + "documentation":"

    Represents a request to cancel an export job using the export job ID.

    " + }, + "CancelExportJobResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, "CaseId":{"type":"string"}, "Charset":{"type":"string"}, "CloudWatchDestination":{ @@ -1672,6 +1793,22 @@ "type":"list", "member":{"shape":"CloudWatchDimensionConfiguration"} }, + "Complaint":{ + "type":"structure", + "members":{ + "ComplaintSubType":{ + "shape":"ComplaintSubType", + "documentation":"

    Can either be null or OnAccountSuppressionList. If the value is OnAccountSuppressionList, SES accepted the message, but didn't attempt to send it because it was on the account-level suppression list.

    " + }, + "ComplaintFeedbackType":{ + "shape":"ComplaintFeedbackType", + "documentation":"

    The value of the Feedback-Type field from the feedback report received from the ISP.

    " + } + }, + "documentation":"

    Information about a Complaint event.

    " + }, + "ComplaintFeedbackType":{"type":"string"}, + "ComplaintSubType":{"type":"string"}, "ConcurrentModificationException":{ "type":"structure", "members":{ @@ -2133,6 +2270,34 @@ }, "documentation":"

    If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

    " }, + "CreateExportJobRequest":{ + "type":"structure", + "required":[ + "ExportDataSource", + "ExportDestination" + ], + "members":{ + "ExportDataSource":{ + "shape":"ExportDataSource", + "documentation":"

    The data source for the export job.

    " + }, + "ExportDestination":{ + "shape":"ExportDestination", + "documentation":"

    The destination for the export job.

    " + } + }, + "documentation":"

    Represents a request to create an export job from a data source to a data destination.

    " + }, + "CreateExportJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    A string that represents the export job ID.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, "CreateImportJobRequest":{ "type":"structure", "required":[ @@ -2240,7 +2405,7 @@ }, "DataFormat":{ "type":"string", - "documentation":"

    The data format of the import job's data source.

    ", + "documentation":"

    The data format of a file, can be one of the following:

    • CSV – A comma-separated values file.

    • JSON – A JSON file.

    ", "enum":[ "CSV", "JSON" @@ -2568,6 +2733,18 @@ "type":"string", "documentation":"

    The subject line for an email that you submitted in a predictive inbox placement test.

    " }, + "DeliveryEventType":{ + "type":"string", + "documentation":"

    The type of delivery events:

    • SEND - The send request was successful and SES will attempt to deliver the message to the recipient’s mail server. (If account-level or global suppression is being used, SES will still count it as a send, but delivery is suppressed.)

    • DELIVERY - SES successfully delivered the email to the recipient's mail server. Excludes deliveries to the mailbox simulator and emails addressed to more than one recipient.

    • TRANSIENT_BOUNCE - Feedback received for delivery failures excluding issues with non-existent mailboxes. Excludes bounces from the mailbox simulator, and those from emails addressed to more than one recipient.

    • PERMANENT_BOUNCE - Feedback received for emails sent to non-existent mailboxes. Excludes bounces from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those from emails addressed to more than one recipient.

    • UNDETERMINED_BOUNCE - SES was unable to determine the bounce reason.

    • COMPLAINT - Complaint received for the email. This excludes complaints from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those from emails addressed to more than one recipient.

    ", + "enum":[ + "SEND", + "DELIVERY", + "TRANSIENT_BOUNCE", + "PERMANENT_BOUNCE", + "UNDETERMINED_BOUNCE", + "COMPLAINT" + ] + }, "DeliveryOptions":{ "type":"structure", "members":{ @@ -2601,6 +2778,7 @@ }, "documentation":"

    An object that describes the recipients for an email.

    Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492.

    " }, + "DiagnosticCode":{"type":"string"}, "DimensionName":{ "type":"string", "documentation":"

    The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:

    • It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " @@ -2826,6 +3004,11 @@ "member":{"shape":"DomainIspPlacement"} }, "EmailAddress":{"type":"string"}, + "EmailAddressFilterList":{ + "type":"list", + "member":{"shape":"InsightsEmailAddress"}, + "max":5 + }, "EmailAddressList":{ "type":"list", "member":{"shape":"EmailAddress"} @@ -2848,6 +3031,39 @@ }, "documentation":"

    An object that defines the entire content of the email, including the message headers and the body content. You can create a simple email message, in which you specify the subject and the text and HTML versions of the message body. You can also create raw messages, in which you specify a complete MIME-formatted message. Raw messages can include attachments and custom headers.

    " }, + "EmailInsights":{ + "type":"structure", + "members":{ + "Destination":{ + "shape":"InsightsEmailAddress", + "documentation":"

    The recipient of the email.

    " + }, + "Isp":{ + "shape":"Isp", + "documentation":"

    The recipient's ISP (e.g., Gmail, Yahoo, etc.).

    " + }, + "Events":{ + "shape":"InsightsEvents", + "documentation":"

    A list of events associated with the sent email.

    " + } + }, + "documentation":"

    An email's insights contain metadata and delivery information about a specific email.

    " + }, + "EmailInsightsList":{ + "type":"list", + "member":{"shape":"EmailInsights"} + }, + "EmailSubject":{ + "type":"string", + "max":998, + "min":1, + "sensitive":true + }, + "EmailSubjectFilterList":{ + "type":"list", + "member":{"shape":"EmailSubject"}, + "max":1 + }, "EmailTemplateContent":{ "type":"structure", "members":{ @@ -2909,6 +3125,14 @@ }, "Enabled":{"type":"boolean"}, "EnabledWrapper":{"type":"boolean"}, + "EngagementEventType":{ + "type":"string", + "documentation":"

    The type of delivery events:

    • OPEN - Open event for emails including open trackers. Excludes opens for emails addressed to more than one recipient.

    • CLICK - Click event for emails including wrapped links. Excludes clicks for emails addressed to more than one recipient.

    ", + "enum":[ + "OPEN", + "CLICK" + ] + }, "ErrorMessage":{"type":"string"}, "Esp":{"type":"string"}, "Esps":{ @@ -2991,6 +3215,20 @@ "type":"list", "member":{"shape":"EventDestination"} }, + "EventDetails":{ + "type":"structure", + "members":{ + "Bounce":{ + "shape":"Bounce", + "documentation":"

    Information about a Bounce event.

    " + }, + "Complaint":{ + "shape":"Complaint", + "documentation":"

    Information about a Complaint event.

    " + } + }, + "documentation":"

    Contains a Bounce object if the event type is BOUNCE. Contains a Complaint object if the event type is COMPLAINT.

    " + }, "EventType":{ "type":"string", "documentation":"

    An email sending event type. For example, email sends, opens, and bounces are all email events.

    ", @@ -3011,6 +3249,110 @@ "type":"list", "member":{"shape":"EventType"} }, + "ExportDataSource":{ + "type":"structure", + "members":{ + "MetricsDataSource":{"shape":"MetricsDataSource"}, + "MessageInsightsDataSource":{"shape":"MessageInsightsDataSource"} + }, + "documentation":"

    An object that contains details about the data source of the export job. It can only contain one of MetricsDataSource or MessageInsightsDataSource object.

    " + }, + "ExportDestination":{ + "type":"structure", + "required":["DataFormat"], + "members":{ + "DataFormat":{ + "shape":"DataFormat", + "documentation":"

    The data format of the final export job file, can be one of the following:

    • CSV - A comma-separated values file.

    • JSON - A Json file.

    " + }, + "S3Url":{ + "shape":"S3Url", + "documentation":"

    An Amazon S3 pre-signed URL that points to the generated export file.

    " + } + }, + "documentation":"

    An object that contains details about the destination of the export job.

    " + }, + "ExportDimensionValue":{ + "type":"list", + "member":{"shape":"MetricDimensionValue"}, + "max":10, + "min":1 + }, + "ExportDimensions":{ + "type":"map", + "key":{"shape":"MetricDimensionName"}, + "value":{"shape":"ExportDimensionValue"}, + "max":3, + "min":1 + }, + "ExportJobSummary":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The export job ID.

    " + }, + "ExportSourceType":{ + "shape":"ExportSourceType", + "documentation":"

    The source type of the export job.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The status of the export job.

    " + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the export job was created.

    " + }, + "CompletedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the export job was completed.

    " + } + }, + "documentation":"

    A summary of the export job.

    " + }, + "ExportJobSummaryList":{ + "type":"list", + "member":{"shape":"ExportJobSummary"}, + "documentation":"

    A list of the export job summaries.

    " + }, + "ExportMetric":{ + "type":"structure", + "members":{ + "Name":{"shape":"Metric"}, + "Aggregation":{"shape":"MetricAggregation"} + }, + "documentation":"

    An object that contains a mapping between a Metric and MetricAggregation.

    " + }, + "ExportMetrics":{ + "type":"list", + "member":{"shape":"ExportMetric"}, + "max":10, + "min":1 + }, + "ExportSourceType":{ + "type":"string", + "documentation":"

    The type of data source of an export, can be one of the following:

    • METRICS_DATA - The metrics export.

    • MESSAGE_INSIGHTS - The Message Insights export.

    ", + "enum":[ + "METRICS_DATA", + "MESSAGE_INSIGHTS" + ] + }, + "ExportStatistics":{ + "type":"structure", + "members":{ + "ProcessedRecordsCount":{ + "shape":"ProcessedRecordsCount", + "documentation":"

    The number of records that were processed to generate the final export file.

    " + }, + "ExportedRecordsCount":{ + "shape":"ExportedRecordsCount", + "documentation":"

    The number of records that were exported to the final export file.

    This value might not be available for all export source types

    " + } + }, + "documentation":"

    Statistics about the execution of an export job.

    " + }, + "ExportedRecordsCount":{"type":"integer"}, "FailedRecordsCount":{"type":"integer"}, "FailedRecordsS3Url":{"type":"string"}, "FailureInfo":{ @@ -3018,14 +3360,14 @@ "members":{ "FailedRecordsS3Url":{ "shape":"FailedRecordsS3Url", - "documentation":"

    An Amazon S3 presigned URL that contains all the failed records and related information.

    " + "documentation":"

    An Amazon S3 pre-signed URL that contains all the failed records and related information.

    " }, "ErrorMessage":{ "shape":"ErrorMessage", - "documentation":"

    A message about why the import job failed.

    " + "documentation":"

    A message about why the job failed.

    " } }, - "documentation":"

    An object that contains the failure details about an import job.

    " + "documentation":"

    An object that contains the failure details about a job.

    " }, "FailureRedirectionURL":{ "type":"string", @@ -3665,6 +4007,61 @@ }, "documentation":"

    The following element is returned by the service.

    " }, + "GetExportJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The export job ID.

    ", + "location":"uri", + "locationName":"JobId" + } + }, + "documentation":"

    Represents a request to retrieve information about an export job using the export job ID.

    " + }, + "GetExportJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"JobId", + "documentation":"

    The export job ID.

    " + }, + "ExportSourceType":{ + "shape":"ExportSourceType", + "documentation":"

    The type of source of the export job.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    The status of the export job.

    " + }, + "ExportDestination":{ + "shape":"ExportDestination", + "documentation":"

    The destination of the export job.

    " + }, + "ExportDataSource":{ + "shape":"ExportDataSource", + "documentation":"

    The data source of the export job.

    " + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the export job was created.

    " + }, + "CompletedTimestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of when the export job was completed.

    " + }, + "FailureInfo":{ + "shape":"FailureInfo", + "documentation":"

    The failure details about an export job.

    " + }, + "Statistics":{ + "shape":"ExportStatistics", + "documentation":"

    The statistics about the export job.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, "GetImportJobRequest":{ "type":"structure", "required":["JobId"], @@ -3720,6 +4117,45 @@ }, "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " }, + "GetMessageInsightsRequest":{ + "type":"structure", + "required":["MessageId"], + "members":{ + "MessageId":{ + "shape":"OutboundMessageId", + "documentation":"

    A MessageId is a unique identifier for a message, and is returned when sending emails through Amazon SES.

    ", + "location":"uri", + "locationName":"MessageId" + } + }, + "documentation":"

    A request to return information about a message.

    " + }, + "GetMessageInsightsResponse":{ + "type":"structure", + "members":{ + "MessageId":{ + "shape":"OutboundMessageId", + "documentation":"

    A unique identifier for the message.

    " + }, + "FromEmailAddress":{ + "shape":"InsightsEmailAddress", + "documentation":"

    The from address used to send the message.

    " + }, + "Subject":{ + "shape":"EmailSubject", + "documentation":"

    The subject line of the message.

    " + }, + "EmailTags":{ + "shape":"MessageTagList", + "documentation":"

    A list of tags, in the form of name/value pairs, that were applied to the email you sent, along with Amazon SES Auto-Tags.

    " + }, + "Insights":{ + "shape":"EmailInsightsList", + "documentation":"

    A set of insights associated with the message.

    " + } + }, + "documentation":"

    Information about a message.

    " + }, "GetSuppressedDestinationRequest":{ "type":"structure", "required":["EmailAddress"], @@ -3883,6 +4319,34 @@ }, "documentation":"

    An object that contains information about the inbox placement data settings for a verified domain that’s associated with your Amazon Web Services account. This data is available only if you enabled the Deliverability dashboard for the domain.

    " }, + "InsightsEmailAddress":{ + "type":"string", + "max":320, + "min":1, + "sensitive":true + }, + "InsightsEvent":{ + "type":"structure", + "members":{ + "Timestamp":{ + "shape":"Timestamp", + "documentation":"

    The timestamp of the event.

    " + }, + "Type":{ + "shape":"EventType", + "documentation":"

    The type of event:

    • SEND - The send request was successful and SES will attempt to deliver the message to the recipient’s mail server. (If account-level or global suppression is being used, SES will still count it as a send, but delivery is suppressed.)

    • DELIVERY - SES successfully delivered the email to the recipient's mail server. Excludes deliveries to the mailbox simulator, and those from emails addressed to more than one recipient.

    • BOUNCE - Feedback received for delivery failures. Additional details about the bounce are provided in the Details object. Excludes bounces from the mailbox simulator, and those from emails addressed to more than one recipient.

    • COMPLAINT - Complaint received for the email. Additional details about the complaint are provided in the Details object. This excludes complaints from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those from emails addressed to more than one recipient.

    • OPEN - Open event for emails including open trackers. Excludes opens for emails addressed to more than one recipient.

    • CLICK - Click event for emails including wrapped links. Excludes clicks for emails addressed to more than one recipient.

    " + }, + "Details":{ + "shape":"EventDetails", + "documentation":"

    Details about bounce or complaint events.

    " + } + }, + "documentation":"

    An object containing details about a specific event.

    " + }, + "InsightsEvents":{ + "type":"list", + "member":{"shape":"InsightsEvent"} + }, "InternalServiceErrorException":{ "type":"structure", "members":{ @@ -3908,6 +4372,12 @@ "type":"list", "member":{"shape":"Ip"} }, + "Isp":{"type":"string"}, + "IspFilterList":{ + "type":"list", + "member":{"shape":"Isp"}, + "max":5 + }, "IspName":{ "type":"string", "documentation":"

    The name of an email provider.

    " @@ -3936,17 +4406,18 @@ }, "JobId":{ "type":"string", - "documentation":"

    A string that represents the import job ID.

    ", + "documentation":"

    A string that represents a job ID.

    ", "min":1 }, "JobStatus":{ "type":"string", - "documentation":"

    The status of the import job.

    ", + "documentation":"

    The status of a job.

    • CREATED – Job has just been created.

    • PROCESSING – Job is processing.

    • ERROR – An error occurred during processing.

    • COMPLETED – Job has completed processing successfully.

    ", "enum":[ "CREATED", "PROCESSING", "COMPLETED", - "FAILED" + "FAILED", + "CANCELLED" ] }, "KinesisFirehoseDestination":{ @@ -3967,6 +4438,16 @@ }, "documentation":"

    An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.

    " }, + "LastDeliveryEventList":{ + "type":"list", + "member":{"shape":"DeliveryEventType"}, + "max":5 + }, + "LastEngagementEventList":{ + "type":"list", + "member":{"shape":"EngagementEventType"}, + "max":2 + }, "LastFreshStart":{ "type":"timestamp", "documentation":"

    The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.

    " @@ -4313,6 +4794,42 @@ }, "documentation":"

    The following elements are returned by the service.

    " }, + "ListExportJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token returned from a previous call to ListExportJobs to indicate the position in the list of export jobs.

    " + }, + "PageSize":{ + "shape":"MaxItems", + "documentation":"

    Maximum number of export jobs to return at once. Use this parameter to paginate results. If additional export jobs exist beyond the specified limit, the NextToken element is sent in the response. Use the NextToken value in subsequent calls to ListExportJobs to retrieve additional export jobs.

    " + }, + "ExportSourceType":{ + "shape":"ExportSourceType", + "documentation":"

    A value used to list export jobs that have a certain ExportSourceType.

    " + }, + "JobStatus":{ + "shape":"JobStatus", + "documentation":"

    A value used to list export jobs that have a certain JobStatus.

    " + } + }, + "documentation":"

    Represents a request to list all export jobs with filters.

    " + }, + "ListExportJobsResponse":{ + "type":"structure", + "members":{ + "ExportJobs":{ + "shape":"ExportJobSummaryList", + "documentation":"

    A list of the export job summaries.

    " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

    A string token indicating that there might be additional export jobs available to be listed. Use this token to a subsequent call to ListExportJobs with the same parameters to retrieve the next page of export jobs.

    " + } + }, + "documentation":"

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    " + }, "ListImportJobsRequest":{ "type":"structure", "members":{ @@ -4582,6 +5099,71 @@ "documentation":"

    The body of an email message.

    " }, "MessageData":{"type":"string"}, + "MessageInsightsDataSource":{ + "type":"structure", + "required":[ + "StartDate", + "EndDate" + ], + "members":{ + "StartDate":{ + "shape":"Timestamp", + "documentation":"

    Represents the start date for the export interval as a timestamp. The start date is inclusive.

    " + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

    Represents the end date for the export interval as a timestamp. The end date is inclusive.

    " + }, + "Include":{ + "shape":"MessageInsightsFilters", + "documentation":"

    Filters for results to be included in the export file.

    " + }, + "Exclude":{ + "shape":"MessageInsightsFilters", + "documentation":"

    Filters for results to be excluded from the export file.

    " + }, + "MaxResults":{ + "shape":"MessageInsightsExportMaxResults", + "documentation":"

    The maximum number of results.

    " + } + }, + "documentation":"

    An object that contains filters applied when performing the Message Insights export.

    " + }, + "MessageInsightsExportMaxResults":{ + "type":"integer", + "max":10000, + "min":1 + }, + "MessageInsightsFilters":{ + "type":"structure", + "members":{ + "FromEmailAddress":{ + "shape":"EmailAddressFilterList", + "documentation":"

    The from address used to send the message.

    " + }, + "Destination":{ + "shape":"EmailAddressFilterList", + "documentation":"

    The recipient's email address.

    " + }, + "Subject":{ + "shape":"EmailSubjectFilterList", + "documentation":"

    The subject line of the message.

    " + }, + "Isp":{ + "shape":"IspFilterList", + "documentation":"

    The recipient's ISP (e.g., Gmail, Yahoo, etc.).

    " + }, + "LastDeliveryEvent":{ + "shape":"LastDeliveryEventList", + "documentation":"

    The last delivery-related event for the email, where the ordering is as follows: SEND < BOUNCE < DELIVERY < COMPLAINT.

    " + }, + "LastEngagementEvent":{ + "shape":"LastEngagementEventList", + "documentation":"

    The last engagement-related event for the email, where the ordering is as follows: OPEN < CLICK.

    Engagement events are only available if Engagement tracking is enabled.

    " + } + }, + "documentation":"

    An object containing Message Insights filters.

    If you specify multiple filters, the filters are joined by AND.

    If you specify multiple values for a filter, the values are joined by OR. Filter values are case-sensitive.

    FromEmailAddress, Destination, and Subject filters support partial match. A partial match is performed by using the * wildcard character placed at the beginning (suffix match), the end (prefix match) or both ends of the string (contains match). In order to match the literal characters * or \\, they must be escaped using the \\ character. If no wildcard character is present, an exact match is performed.

    " + }, "MessageRejected":{ "type":"structure", "members":{ @@ -4623,6 +5205,7 @@ }, "Metric":{ "type":"string", + "documentation":"

    The metric to export, can be one of the following:

    • SEND - Emails sent eligible for tracking in the VDM dashboard. This excludes emails sent to the mailbox simulator and emails addressed to more than one recipient.

    • COMPLAINT - Complaints received for your account. This excludes complaints from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those for emails addressed to more than one recipient

    • PERMANENT_BOUNCE - Permanent bounces - i.e., feedback received for emails sent to non-existent mailboxes. Excludes bounces from the mailbox simulator, those originating from your account-level suppression list (if enabled), and those for emails addressed to more than one recipient.

    • TRANSIENT_BOUNCE - Transient bounces - i.e., feedback received for delivery failures excluding issues with non-existent mailboxes. Excludes bounces from the mailbox simulator, and those for emails addressed to more than one recipient.

    • OPEN - Unique open events for emails including open trackers. Excludes opens for emails addressed to more than one recipient.

    • CLICK - Unique click events for emails including wrapped links. Excludes clicks for emails addressed to more than one recipient.

    • DELIVERY - Successful deliveries for email sending attempts. Excludes deliveries to the mailbox simulator and for emails addressed to more than one recipient.

    • DELIVERY_OPEN - Successful deliveries for email sending attempts. Excludes deliveries to the mailbox simulator, for emails addressed to more than one recipient, and emails without open trackers.

    • DELIVERY_CLICK - Successful deliveries for email sending attempts. Excludes deliveries to the mailbox simulator, for emails addressed to more than one recipient, and emails without click trackers.

    • DELIVERY_COMPLAINT - Successful deliveries for email sending attempts. Excludes deliveries to the mailbox simulator, for emails addressed to more than one recipient, and emails addressed to recipients hosted by ISPs with which Amazon SES does not have a feedback loop agreement.

    ", "enum":[ "SEND", "COMPLAINT", @@ -4636,6 +5219,14 @@ "DELIVERY_COMPLAINT" ] }, + "MetricAggregation":{ + "type":"string", + "documentation":"

    The aggregation to apply to a metric, can be one of the following:

    • VOLUME - The volume of events for this metric.

    • RATE - The rate for this metric relative to the SEND metric volume.

    ", + "enum":[ + "RATE", + "VOLUME" + ] + }, "MetricDataError":{ "type":"structure", "members":{ @@ -4689,7 +5280,10 @@ "ISP" ] }, - "MetricDimensionValue":{"type":"string"}, + "MetricDimensionValue":{ + "type":"string", + "documentation":"

    A list of values associated with the MetricDimensionName to filter metrics by. Can either be * as a wildcard for all values or a list of up to 10 specific values. If one Dimension has the * value, other dimensions can only contain one value.

    " + }, "MetricNamespace":{ "type":"string", "enum":["VDM"] @@ -4698,6 +5292,39 @@ "type":"list", "member":{"shape":"Counter"} }, + "MetricsDataSource":{ + "type":"structure", + "required":[ + "Dimensions", + "Namespace", + "Metrics", + "StartDate", + "EndDate" + ], + "members":{ + "Dimensions":{ + "shape":"ExportDimensions", + "documentation":"

    An object that contains a mapping between a MetricDimensionName and MetricDimensionValue to filter metrics by. Must contain a least 1 dimension but no more than 3 unique ones.

    " + }, + "Namespace":{ + "shape":"MetricNamespace", + "documentation":"

    The metrics namespace - e.g., VDM.

    " + }, + "Metrics":{ + "shape":"ExportMetrics", + "documentation":"

    A list of ExportMetric objects to export.

    " + }, + "StartDate":{ + "shape":"Timestamp", + "documentation":"

    Represents the start date for the export interval as a timestamp.

    " + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

    Represents the end date for the export interval as a timestamp.

    " + } + }, + "documentation":"

    An object that contains details about the data source for the metrics export.

    " + }, "NextToken":{"type":"string"}, "NotFoundException":{ "type":"structure", @@ -5465,7 +6092,7 @@ }, "S3Url":{ "type":"string", - "documentation":"

    An Amazon S3 URL in the format s3://<bucket_name>/<object>.

    ", + "documentation":"

    An Amazon S3 URL in the format s3://<bucket_name>/<object> or a pre-signed URL.

    ", "pattern":"^s3:\\/\\/([^\\/]+)\\/(.*?([^\\/]+)\\/?)$" }, "ScalingMode":{ diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index 9d33659b91d..661a433e91c 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/shield/pom.xml b/services/shield/pom.xml index b7d677ac95a..18b587880d2 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/shield/src/main/resources/codegen-resources/customization.config b/services/shield/src/main/resources/codegen-resources/customization.config index 3beb25f9846..541a64eac33 100644 --- a/services/shield/src/main/resources/codegen-resources/customization.config +++ b/services/shield/src/main/resources/codegen-resources/customization.config @@ -8,7 +8,7 @@ "listAttacks", "getSubscriptionState" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "updateEmergencyContactSettings", "updateSubscription", "describeDRTAccess", diff --git a/services/signer/pom.xml b/services/signer/pom.xml index 430b649e3d4..12fed285e48 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/simspaceweaver/pom.xml b/services/simspaceweaver/pom.xml index 9c1f25d696d..41b214c6882 100644 --- a/services/simspaceweaver/pom.xml +++ b/services/simspaceweaver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT simspaceweaver AWS Java SDK :: Services :: Sim Space Weaver diff --git a/services/sms/pom.xml b/services/sms/pom.xml index 5ad2bc6b567..1744ccbeb0b 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index e0d471a344f..07ef5c07756 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowball/src/main/resources/codegen-resources/customization.config b/services/snowball/src/main/resources/codegen-resources/customization.config index d6daf2b5dc0..50142bb8bc8 100644 --- a/services/snowball/src/main/resources/codegen-resources/customization.config +++ b/services/snowball/src/main/resources/codegen-resources/customization.config @@ -17,7 +17,7 @@ ] } }, - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "createJob" ] } diff --git a/services/snowdevicemanagement/pom.xml b/services/snowdevicemanagement/pom.xml index ddb11fe120d..7333d414286 100644 --- a/services/snowdevicemanagement/pom.xml +++ b/services/snowdevicemanagement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT snowdevicemanagement AWS Java SDK :: Services :: Snow Device Management diff --git a/services/sns/pom.xml b/services/sns/pom.xml index 747ff7e72f9..1d35495072f 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index 421c160708d..d813d183c6d 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index 7fdc973c098..400ae110211 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssm/src/main/resources/codegen-resources/customization.config b/services/ssm/src/main/resources/codegen-resources/customization.config index 6d66b0458a9..0f8e54aef31 100644 --- a/services/ssm/src/main/resources/codegen-resources/customization.config +++ b/services/ssm/src/main/resources/codegen-resources/customization.config @@ -20,7 +20,7 @@ "listResourceComplianceSummaries", "listResourceDataSync" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deleteAssociation", "describeAssociation", "listComplianceItems", diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index 08b55671009..ec1750778de 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index c882863dda2..2e3d56ae34a 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmsap/pom.xml b/services/ssmsap/pom.xml index 217ba5160c8..d2c09d71889 100644 --- a/services/ssmsap/pom.xml +++ b/services/ssmsap/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ssmsap AWS Java SDK :: Services :: Ssm Sap diff --git a/services/sso/pom.xml b/services/sso/pom.xml index 2d17058e814..00ace679024 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index a105038abf5..e35f28491a3 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 8b0e2a424e4..0426ac40e1a 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index 1637090db50..d875b39e0cc 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/sts/pom.xml b/services/sts/pom.xml index d3033d5f4ef..fb2e7538e53 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/support/pom.xml b/services/support/pom.xml index cc15ff17b2a..20a8bc97144 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/support/src/main/resources/codegen-resources/customization.config b/services/support/src/main/resources/codegen-resources/customization.config index 6c7177e16e0..62198e1a488 100644 --- a/services/support/src/main/resources/codegen-resources/customization.config +++ b/services/support/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "resolveCase", "describeSeverityLevels", "describeCases", diff --git a/services/supportapp/pom.xml b/services/supportapp/pom.xml index b44ea2751b9..8b01b3f2dae 100644 --- a/services/supportapp/pom.xml +++ b/services/supportapp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT supportapp AWS Java SDK :: Services :: Support App diff --git a/services/swf/pom.xml b/services/swf/pom.xml index 638d405a711..f7415a1ddda 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index e9655bd603d..72f251f8959 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 9b344e3ead8..0c9b9ae82c5 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index 4f5393b4727..afc21df1573 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 0f2f8db29e9..0faadf17d53 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/tnb/pom.xml b/services/tnb/pom.xml index f46cae20bdd..c13c5010b5f 100644 --- a/services/tnb/pom.xml +++ b/services/tnb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT tnb AWS Java SDK :: Services :: Tnb diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 0386e1badf5..9b16a93f927 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index c2713f0a019..084a4b043f2 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index f3e40f88268..94b481e73dd 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json index 673f1450cbd..9deccf29a34 100644 --- a/services/transfer/src/main/resources/codegen-resources/service-2.json +++ b/services/transfer/src/main/resources/codegen-resources/service-2.json @@ -4177,7 +4177,7 @@ "members":{ "UserSecretId":{ "shape":"SecretId", - "documentation":"

    The identifiers for the secrets (in Amazon Web Services Secrets Manager) that contain the SFTP user's private keys or passwords.

    " + "documentation":"

    The identifier for the secret (in Amazon Web Services Secrets Manager) that contains the SFTP user's private key, password, or both. The identifier can be either the Amazon Resource Name (ARN) or the name of the secret.

    " }, "TrustedHostKeys":{ "shape":"SftpConnectorTrustedHostKeyList", @@ -4266,7 +4266,7 @@ }, "SendFilePaths":{ "shape":"FilePaths", - "documentation":"

    One or more source paths for the Transfer Family server. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .

    Replace DOC-EXAMPLE-BUCKET with one of your actual buckets.

    " + "documentation":"

    One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt .

    Replace DOC-EXAMPLE-BUCKET with one of your actual buckets.

    " }, "RetrieveFilePaths":{ "shape":"FilePaths", diff --git a/services/translate/pom.xml b/services/translate/pom.xml index 60377c51d99..0828834c7a9 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 translate diff --git a/services/verifiedpermissions/pom.xml b/services/verifiedpermissions/pom.xml index 77f4b11c713..01266b2899a 100644 --- a/services/verifiedpermissions/pom.xml +++ b/services/verifiedpermissions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT verifiedpermissions AWS Java SDK :: Services :: Verified Permissions diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json index 1ee1c6c8398..3a5b73a9e4e 100644 --- a/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,55 +1,55 @@ { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions-fips.us-gov-east-1.api.aws" + "url": "https://verifiedpermissions-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions-fips.us-gov-east-1.amazonaws.com" + "url": "https://verifiedpermissions-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions.us-gov-east-1.api.aws" + "url": "https://verifiedpermissions.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions.us-gov-east-1.amazonaws.com" + "url": "https://verifiedpermissions.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -99,109 +99,109 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://verifiedpermissions-fips.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://verifiedpermissions-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://verifiedpermissions.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions.us-iso-east-1.c2s.ic.gov" + "url": "https://verifiedpermissions.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://verifiedpermissions-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions-fips.us-east-1.amazonaws.com" + "url": "https://verifiedpermissions-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://verifiedpermissions.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://verifiedpermissions.us-east-1.amazonaws.com" + "url": "https://verifiedpermissions.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -210,9 +210,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -223,9 +223,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -234,9 +234,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -247,13 +247,27 @@ } }, "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-isob-east-1" + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" @@ -262,7 +276,6 @@ "params": { "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -272,9 +285,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -284,11 +297,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json b/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json index 04d8ee188f8..d0e3f80e7eb 100644 --- a/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/service-2.json @@ -29,7 +29,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

    After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    To reference a user from this identity source in your Cedar policies, use the following syntax.

    IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

    Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

    ", + "documentation":"

    Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).

    After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    To reference a user from this identity source in your Cedar policies, use the following syntax.

    IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>

    Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "CreatePolicy":{ @@ -48,7 +48,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates a Cedar policy and saves it in the specified policy store. You can create either a static policy or a policy linked to a policy template.

    • To create a static policy, provide the Cedar policy text in the StaticPolicy section of the PolicyDefinition.

    • To create a policy that is dynamically linked to a policy template, specify the policy template ID and the principal and resource to associate with this policy in the templateLinked section of the PolicyDefinition. If the policy template is ever updated, any policies linked to the policy template automatically use the updated template.

    Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, the operation fails and the policy isn't stored.

    ", + "documentation":"

    Creates a Cedar policy and saves it in the specified policy store. You can create either a static policy or a policy linked to a policy template.

    • To create a static policy, provide the Cedar policy text in the StaticPolicy section of the PolicyDefinition.

    • To create a policy that is dynamically linked to a policy template, specify the policy template ID and the principal and resource to associate with this policy in the templateLinked section of the PolicyDefinition. If the policy template is ever updated, any policies linked to the policy template automatically use the updated template.

    Creating a policy causes it to be validated against the schema in the policy store. If the policy doesn't pass validation, the operation fails and the policy isn't stored.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "CreatePolicyStore":{ @@ -66,7 +66,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates a policy store. A policy store is a container for policy resources.

    Although Cedar supports multiple namespaces, Verified Permissions currently supports only one namespace per policy store.

    ", + "documentation":"

    Creates a policy store. A policy store is a container for policy resources.

    Although Cedar supports multiple namespaces, Verified Permissions currently supports only one namespace per policy store.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "CreatePolicyTemplate":{ @@ -85,7 +85,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates a policy template. A template can use placeholders for the principal and resource. A template must be instantiated into a policy by associating it with specific principals and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any policies that are linked to that template are immediately updated as well.

    ", + "documentation":"

    Creates a policy template. A template can use placeholders for the principal and resource. A template must be instantiated into a policy by associating it with specific principals and resources to use for the placeholders. That instantiated policy can then be considered in authorization decisions. The instantiated policy works identically to any other policy, except that it is dynamically linked to the template. If the template changes, then any policies that are linked to that template are immediately updated as well.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "DeleteIdentitySource":{ @@ -279,7 +279,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source. The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    " + "documentation":"

    Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision.

    If you specify the identityToken parameter, then this operation derives the principal from that token. You must not also include that principal in the entities parameter or the operation fails and reports a conflict between the two entity sources.

    If you provide only an accessToken, then you can include the entity as part of the entities parameter to provide additional attributes.

    At this time, Verified Permissions accepts tokens from only Amazon Cognito.

    Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature.

    If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

    " }, "ListIdentitySources":{ "name":"ListIdentitySources", @@ -365,7 +365,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Creates or updates the policy schema in the specified policy store. The schema is used to validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you later update a policy, then it is evaluated against the new schema at that time.

    ", + "documentation":"

    Creates or updates the policy schema in the specified policy store. The schema is used to validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema validate only policies and templates submitted after the schema change. Existing policies and templates are not re-evaluated against the changed schema. If you later update a policy, then it is evaluated against the new schema at that time.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "UpdateIdentitySource":{ @@ -384,7 +384,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

    ", + "documentation":"

    Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "UpdatePolicy":{ @@ -404,7 +404,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

    If policy validation is enabled in the policy store, then updating a static policy causes Verified Permissions to validate the policy against the schema in the policy store. If the updated static policy doesn't pass validation, the operation fails and the update isn't stored.

    ", + "documentation":"

    Modifies a Cedar static policy in the specified policy store. You can change only certain elements of the UpdatePolicyDefinition parameter. You can directly update only static policies. To change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

    • If policy validation is enabled in the policy store, then updating a static policy causes Verified Permissions to validate the policy against the schema in the policy store. If the updated static policy doesn't pass validation, the operation fails and the update isn't stored.

    • When you edit a static policy, You can change only certain elements of a static policy:

      • The action referenced by the policy.

      • A condition clause, such as when and unless.

      You can't change these elements of a static policy:

      • Changing a policy from a static policy to a template-linked policy.

      • Changing the effect of a static policy from permit or forbid.

      • The principal referenced by a static policy.

      • The resource referenced by a static policy.

    • To update a template-linked policy, you must update the template instead.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "UpdatePolicyStore":{ @@ -423,7 +423,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Modifies the validation setting for a policy store.

    ", + "documentation":"

    Modifies the validation setting for a policy store.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true }, "UpdatePolicyTemplate":{ @@ -442,7 +442,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Updates the specified policy template. You can update only the description and the some elements of the policyBody.

    Changes you make to the policy template content are immediately reflected in authorization decisions that involve all template-linked policies instantiated from this template.

    ", + "documentation":"

    Updates the specified policy template. You can update only the description and the some elements of the policyBody.

    Changes you make to the policy template content are immediately reflected in authorization decisions that involve all template-linked policies instantiated from this template.

    Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to be propagate through the service and be visible in the results of other Verified Permissions operations.

    ", "idempotent":true } }, @@ -460,7 +460,8 @@ "type":"string", "max":200, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "ActionIdentifier":{ "type":"structure", @@ -484,7 +485,8 @@ "type":"string", "max":200, "min":1, - "pattern":"Action$|^.+::Action" + "pattern":"Action$|^.+::Action", + "sensitive":true }, "AttributeValue":{ "type":"structure", @@ -523,13 +525,15 @@ }, "BooleanAttribute":{ "type":"boolean", - "box":true + "box":true, + "sensitive":true }, "ClientId":{ "type":"string", "max":255, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "ClientIds":{ "type":"list", @@ -593,8 +597,7 @@ "ContextMap":{ "type":"map", "key":{"shape":"String"}, - "value":{"shape":"AttributeValue"}, - "min":0 + "value":{"shape":"AttributeValue"} }, "CreateIdentitySourceInput":{ "type":"structure", @@ -929,14 +932,14 @@ "EntityAttributes":{ "type":"map", "key":{"shape":"String"}, - "value":{"shape":"AttributeValue"}, - "min":0 + "value":{"shape":"AttributeValue"} }, "EntityId":{ "type":"string", "max":200, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "EntityIdentifier":{ "type":"structure", @@ -973,12 +976,11 @@ "documentation":"

    The parents in the hierarchy that contains the entity.

    " } }, - "documentation":"

    Contains information about an entity that can be referenced in a Cedar policy.

    This data type is used as one of the fields in the EntitiesDefinition structure.

    { \"id\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"Attributes\": {}, \"Parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

    " + "documentation":"

    Contains information about an entity that can be referenced in a Cedar policy.

    This data type is used as one of the fields in the EntitiesDefinition structure.

    { \"identifier\": { \"entityType\": \"Photo\", \"entityId\": \"VacationPhoto94.jpg\" }, \"attributes\": {}, \"parents\": [ { \"entityType\": \"Album\", \"entityId\": \"alice_folder\" } ] }

    " }, "EntityList":{ "type":"list", - "member":{"shape":"EntityItem"}, - "min":0 + "member":{"shape":"EntityItem"} }, "EntityReference":{ "type":"structure", @@ -999,7 +1001,8 @@ "type":"string", "max":200, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "EvaluationErrorItem":{ "type":"structure", @@ -1010,7 +1013,8 @@ "documentation":"

    The error description.

    " } }, - "documentation":"

    Contains a description of an evaluation error.

    This data type is used as a request parameter in the IsAuthorized and IsAuthorizedWithToken operations.

    " + "documentation":"

    Contains a description of an evaluation error.

    This data type is used as a request parameter in the IsAuthorized and IsAuthorizedWithToken operations.

    ", + "sensitive":true }, "EvaluationErrorList":{ "type":"list", @@ -1451,11 +1455,11 @@ }, "identityToken":{ "shape":"Token", - "documentation":"

    Specifies an identity token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.

    " + "documentation":"

    Specifies an identity token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, or both.

    " }, "accessToken":{ "shape":"Token", - "documentation":"

    Specifies an access token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken or an IdentityToken, but not both.

    " + "documentation":"

    Specifies an access token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an AccessToken, or an IdentityToken, or both.

    " }, "action":{ "shape":"ActionIdentifier", @@ -1471,7 +1475,7 @@ }, "entities":{ "shape":"EntitiesDefinition", - "documentation":"

    Specifies the list of resources and principals and their associated attributes that Verified Permissions can examine when evaluating the policies.

    You can include only principal and resource entities in this parameter; you can't include actions. You must specify actions in the schema.

    " + "documentation":"

    Specifies the list of resources and their associated attributes that Verified Permissions can examine when evaluating the policies.

    You can include only resource and action entities in this parameter; you can't include principals.

    • The IsAuthorizedWithToken operation takes principal attributes from only the identityToken or accessToken passed to the operation.

    • For action entities, you can include only their Identifier and EntityType.

    " } } }, @@ -1511,7 +1515,7 @@ }, "maxResults":{ "shape":"ListIdentitySourcesMaxResults", - "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + "documentation":"

    Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    If you do not specify this parameter, the operation defaults to 10 identity sources per response. You can specify a maximum of 200 identity sources per response.

    " }, "filters":{ "shape":"IdentitySourceFilters", @@ -1553,7 +1557,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + "documentation":"

    Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    If you do not specify this parameter, the operation defaults to 10 policies per response. You can specify a maximum of 50 policies per response.

    " }, "filter":{ "shape":"PolicyFilter", @@ -1584,7 +1588,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + "documentation":"

    Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    If you do not specify this parameter, the operation defaults to 10 policy stores per response. You can specify a maximum of 50 policy stores per response.

    " } } }, @@ -1616,7 +1620,7 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    Specifies the total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    " + "documentation":"

    Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results.

    If you do not specify this parameter, the operation defaults to 10 policy templates per response. You can specify a maximum of 50 policy templates per response.

    " } } }, @@ -1636,19 +1640,21 @@ }, "LongAttribute":{ "type":"long", - "box":true + "box":true, + "sensitive":true }, "MaxResults":{ "type":"integer", "box":true, - "max":20, + "max":50, "min":1 }, "Namespace":{ "type":"string", "max":100, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "NamespaceList":{ "type":"list", @@ -1796,7 +1802,8 @@ "PolicyStatement":{ "type":"string", "max":10000, - "min":1 + "min":1, + "sensitive":true }, "PolicyStoreId":{ "type":"string", @@ -1834,7 +1841,8 @@ "PolicyTemplateDescription":{ "type":"string", "max":150, - "min":0 + "min":0, + "sensitive":true }, "PolicyTemplateId":{ "type":"string", @@ -1889,7 +1897,8 @@ "type":"string", "max":200, "min":1, - "pattern":".*" + "pattern":".*", + "sensitive":true }, "PutSchemaInput":{ "type":"structure", @@ -2013,7 +2022,8 @@ "SchemaJson":{ "type":"string", "max":10000, - "min":1 + "min":1, + "sensitive":true }, "ServiceQuotaExceededException":{ "type":"structure", @@ -2090,10 +2100,14 @@ "StaticPolicyDescription":{ "type":"string", "max":150, - "min":0 + "min":0, + "sensitive":true }, "String":{"type":"string"}, - "StringAttribute":{"type":"string"}, + "StringAttribute":{ + "type":"string", + "sensitive":true + }, "TemplateLinkedPolicyDefinition":{ "type":"structure", "required":["policyTemplateId"], @@ -2177,7 +2191,8 @@ "type":"string", "max":131072, "min":1, - "pattern":"[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+" + "pattern":"[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+.[A-Za-z0-9-_=]+", + "sensitive":true }, "UpdateCognitoUserPoolConfiguration":{ "type":"structure", diff --git a/services/voiceid/pom.xml b/services/voiceid/pom.xml index 630ab16447f..d014d88f39e 100644 --- a/services/voiceid/pom.xml +++ b/services/voiceid/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT voiceid AWS Java SDK :: Services :: Voice ID diff --git a/services/vpclattice/pom.xml b/services/vpclattice/pom.xml index 83cae67e285..eb29597a9a7 100644 --- a/services/vpclattice/pom.xml +++ b/services/vpclattice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT vpclattice AWS Java SDK :: Services :: VPC Lattice diff --git a/services/waf/pom.xml b/services/waf/pom.xml index f3c754d97cf..cf4b57b5dc1 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/waf/src/main/resources/codegen-resources/waf/customization.config b/services/waf/src/main/resources/codegen-resources/waf/customization.config index 8da5cd6c947..23503733d43 100644 --- a/services/waf/src/main/resources/codegen-resources/waf/customization.config +++ b/services/waf/src/main/resources/codegen-resources/waf/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listActivatedRulesInRuleGroup", "listLoggingConfigurations" ], diff --git a/services/waf/src/main/resources/codegen-resources/wafregional/customization.config b/services/waf/src/main/resources/codegen-resources/wafregional/customization.config index c2cd16ad4d6..e8d7cb99166 100644 --- a/services/waf/src/main/resources/codegen-resources/wafregional/customization.config +++ b/services/waf/src/main/resources/codegen-resources/wafregional/customization.config @@ -6,7 +6,7 @@ }, "sdkRequestBaseClassName": "WafRequest", "sdkResponseBaseClassName": "WafResponse", - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "listActivatedRulesInRuleGroup", "listLoggingConfigurations" ], diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index 6c5e75e39cc..72af1a1b60c 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index b09e5fe991d..66a0a613ed1 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wisdom/pom.xml b/services/wisdom/pom.xml index efc100fde13..5c88190f8ce 100644 --- a/services/wisdom/pom.xml +++ b/services/wisdom/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT wisdom AWS Java SDK :: Services :: Wisdom diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index 62e4c30b262..be49ce7538e 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/workdocs/src/main/resources/codegen-resources/customization.config b/services/workdocs/src/main/resources/codegen-resources/customization.config index 59ace42147e..304bdf6f931 100644 --- a/services/workdocs/src/main/resources/codegen-resources/customization.config +++ b/services/workdocs/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,5 @@ { - "blacklistedSimpleMethods" : [ + "excludedSimpleMethods" : [ "describeUsers", "describeActivities", "getResources" diff --git a/services/worklink/pom.xml b/services/worklink/pom.xml index f319f11cca7..72f87cbfd88 100644 --- a/services/worklink/pom.xml +++ b/services/worklink/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT worklink AWS Java SDK :: Services :: WorkLink diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index fc5461e39b7..4f3d66993a9 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 workmail diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index 0f70544170d..d527d4fdf88 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 8e75ee17f63..66b4f3d7ae9 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspaces/src/main/resources/codegen-resources/customization.config b/services/workspaces/src/main/resources/codegen-resources/customization.config index 8ed4e567f57..ab497503791 100644 --- a/services/workspaces/src/main/resources/codegen-resources/customization.config +++ b/services/workspaces/src/main/resources/codegen-resources/customization.config @@ -7,7 +7,7 @@ "describeWorkspaces", "describeWorkspacesConnectionStatus" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "describeAccountModifications", "describeAccount" ] diff --git a/services/workspacesweb/pom.xml b/services/workspacesweb/pom.xml index 3f2c3fa83c7..8da4410e533 100644 --- a/services/workspacesweb/pom.xml +++ b/services/workspacesweb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT workspacesweb AWS Java SDK :: Services :: Work Spaces Web diff --git a/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json index 1552c84bcb8..7124ded3a94 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/workspacesweb/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://workspaces-web-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://workspaces-web.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://workspaces-web.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://workspaces-web.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://workspaces-web.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/services/workspacesweb/src/main/resources/codegen-resources/service-2.json b/services/workspacesweb/src/main/resources/codegen-resources/service-2.json index 1efd59c263c..3c4785fbd2e 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/service-2.json +++ b/services/workspacesweb/src/main/resources/codegen-resources/service-2.json @@ -1380,6 +1380,7 @@ }, "BrowserSettingsSummary":{ "type":"structure", + "required":["browserSettingsArn"], "members":{ "browserSettingsArn":{ "shape":"ARN", @@ -1498,6 +1499,64 @@ }, "exception":true }, + "CookieDomain":{ + "type":"string", + "max":253, + "min":0, + "pattern":"^(\\.?)(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)*[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$" + }, + "CookieName":{ + "type":"string", + "max":4096, + "min":0 + }, + "CookiePath":{ + "type":"string", + "max":2000, + "min":0, + "pattern":"^/(\\S)*$" + }, + "CookieSpecification":{ + "type":"structure", + "required":["domain"], + "members":{ + "domain":{ + "shape":"CookieDomain", + "documentation":"

    The domain of the cookie.

    " + }, + "name":{ + "shape":"CookieName", + "documentation":"

    The name of the cookie.

    " + }, + "path":{ + "shape":"CookiePath", + "documentation":"

    The path of the cookie.

    " + } + }, + "documentation":"

    Specifies a single cookie or set of cookies in an end user's browser.

    " + }, + "CookieSpecifications":{ + "type":"list", + "member":{"shape":"CookieSpecification"}, + "max":10, + "min":0 + }, + "CookieSynchronizationConfiguration":{ + "type":"structure", + "required":["allowlist"], + "members":{ + "allowlist":{ + "shape":"CookieSpecifications", + "documentation":"

    The list of cookie specifications that are allowed to be synchronized to the remote browser.

    " + }, + "blocklist":{ + "shape":"CookieSpecifications", + "documentation":"

    The list of cookie specifications that are blocked from being synchronized to the remote browser.

    " + } + }, + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    ", + "sensitive":true + }, "CreateBrowserSettingsRequest":{ "type":"structure", "required":["browserPolicy"], @@ -1572,7 +1631,7 @@ "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    " } } @@ -1778,15 +1837,27 @@ "uploadAllowed" ], "members":{ + "additionalEncryptionContext":{ + "shape":"EncryptionContextMap", + "documentation":"

    The additional encryption context of the user settings.

    " + }, "clientToken":{ "shape":"ClientToken", "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request.

    If you do not specify a client token, one is automatically generated by the AWS SDK.

    ", "idempotencyToken":true }, + "cookieSynchronizationConfiguration":{ + "shape":"CookieSynchronizationConfiguration", + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    " + }, "copyAllowed":{ "shape":"EnabledType", "documentation":"

    Specifies whether the user can copy text from the streaming session to the local device.

    " }, + "customerManagedKey":{ + "shape":"keyArn", + "documentation":"

    The customer managed key used to encrypt sensitive information in the user settings.

    " + }, "disconnectTimeoutInMinutes":{ "shape":"DisconnectTimeoutInMinutes", "documentation":"

    The amount of time that a streaming session remains active after users disconnect.

    " @@ -1849,7 +1920,7 @@ "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    ", "location":"uri", "locationName":"identityProviderArn" @@ -2125,7 +2196,7 @@ "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    ", "location":"uri", "locationName":"identityProviderArn" @@ -2253,6 +2324,7 @@ }, "GetTrustStoreCertificateResponse":{ "type":"structure", + "required":["trustStoreArn"], "members":{ "certificate":{ "shape":"Certificate", @@ -2332,7 +2404,7 @@ "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    " }, "identityProviderDetails":{ @@ -2369,9 +2441,10 @@ }, "IdentityProviderSummary":{ "type":"structure", + "required":["identityProviderArn"], "members":{ "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    " }, "identityProviderName":{ @@ -2455,6 +2528,7 @@ }, "IpAccessSettingsSummary":{ "type":"structure", + "required":["ipAccessSettingsArn"], "members":{ "creationDate":{ "shape":"Timestamp", @@ -2714,6 +2788,7 @@ }, "ListTrustStoreCertificatesResponse":{ "type":"structure", + "required":["trustStoreArn"], "members":{ "certificateList":{ "shape":"CertificateSummaryList", @@ -2857,6 +2932,7 @@ }, "NetworkSettingsSummary":{ "type":"structure", + "required":["networkSettingsArn"], "members":{ "networkSettingsArn":{ "shape":"ARN", @@ -2877,6 +2953,7 @@ }, "Portal":{ "type":"structure", + "required":["portalArn"], "members":{ "authenticationType":{ "shape":"AuthenticationType", @@ -2961,6 +3038,7 @@ }, "PortalSummary":{ "type":"structure", + "required":["portalArn"], "members":{ "authenticationType":{ "shape":"AuthenticationType", @@ -3120,6 +3198,12 @@ "max":3, "min":2 }, + "SubresourceARN":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:[\\w+=\\/,.@-]+:[a-zA-Z0-9\\-]+:[a-zA-Z0-9\\-]*:[a-zA-Z0-9]{1,12}:[a-zA-Z]+(\\/[a-fA-F0-9\\-]{36}){2,}$" + }, "Tag":{ "type":"structure", "required":[ @@ -3240,6 +3324,7 @@ }, "TrustStore":{ "type":"structure", + "required":["trustStoreArn"], "members":{ "associatedPortalArns":{ "shape":"ArnList", @@ -3333,7 +3418,7 @@ "idempotencyToken":true }, "identityProviderArn":{ - "shape":"ARN", + "shape":"SubresourceARN", "documentation":"

    The ARN of the identity provider.

    ", "location":"uri", "locationName":"identityProviderArn" @@ -3544,6 +3629,10 @@ "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request.

    If you do not specify a client token, one is automatically generated by the AWS SDK.

    ", "idempotencyToken":true }, + "cookieSynchronizationConfiguration":{ + "shape":"CookieSynchronizationConfiguration", + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    If the allowlist and blocklist are empty, the configuration becomes null.

    " + }, "copyAllowed":{ "shape":"EnabledType", "documentation":"

    Specifies whether the user can copy text from the streaming session to the local device.

    " @@ -3615,6 +3704,7 @@ }, "UserAccessLoggingSettingsSummary":{ "type":"structure", + "required":["userAccessLoggingSettingsArn"], "members":{ "kinesisStreamArn":{ "shape":"KinesisStreamArn", @@ -3635,6 +3725,10 @@ "shape":"ArnList", "documentation":"

    A list of web portal ARNs that this user settings is associated with.

    " }, + "cookieSynchronizationConfiguration":{ + "shape":"CookieSynchronizationConfiguration", + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    " + }, "copyAllowed":{ "shape":"EnabledType", "documentation":"

    Specifies whether the user can copy text from the streaming session to the local device.

    " @@ -3676,7 +3770,12 @@ }, "UserSettingsSummary":{ "type":"structure", + "required":["userSettingsArn"], "members":{ + "cookieSynchronizationConfiguration":{ + "shape":"CookieSynchronizationConfiguration", + "documentation":"

    The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

    " + }, "copyAllowed":{ "shape":"EnabledType", "documentation":"

    Specifies whether the user can copy text from the streaming session to the local device.

    " diff --git a/services/xray/pom.xml b/services/xray/pom.xml index ae9252a6bab..4d16215260d 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/services/xray/src/main/resources/codegen-resources/customization.config b/services/xray/src/main/resources/codegen-resources/customization.config index 0d2f3c58141..323772188ef 100644 --- a/services/xray/src/main/resources/codegen-resources/customization.config +++ b/services/xray/src/main/resources/codegen-resources/customization.config @@ -5,7 +5,7 @@ "getSamplingRules", "getSamplingStatisticSummaries" ], - "blacklistedSimpleMethods": [ + "excludedSimpleMethods": [ "deleteSamplingRule", "getGroup" ], diff --git a/test/auth-tests/pom.xml b/test/auth-tests/pom.xml index 68e47c72ae9..4ed3af17dc9 100644 --- a/test/auth-tests/pom.xml +++ b/test/auth-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 04096bebb58..d6f294dfdc0 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java index c2b701217cf..583f1ae1a30 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/CoreMetricsTest.java @@ -24,8 +24,10 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.net.URI; import java.time.Duration; import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; import org.junit.After; import org.junit.Before; @@ -41,6 +43,7 @@ import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.metrics.CoreMetric; import software.amazon.awssdk.core.internal.metrics.SdkErrorType; +import software.amazon.awssdk.endpoints.Endpoint; import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.http.ExecutableHttpRequest; import software.amazon.awssdk.http.HttpExecuteRequest; @@ -52,6 +55,8 @@ import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonClient; +import software.amazon.awssdk.services.protocolrestjson.endpoints.ProtocolRestJsonEndpointParams; +import software.amazon.awssdk.services.protocolrestjson.endpoints.ProtocolRestJsonEndpointProvider; import software.amazon.awssdk.services.protocolrestjson.model.EmptyModeledException; import software.amazon.awssdk.services.protocolrestjson.model.SimpleStruct; import software.amazon.awssdk.services.protocolrestjson.paginators.PaginatedOperationWithResultKeyIterable; @@ -77,6 +82,9 @@ public class CoreMetricsTest { @Mock private MetricPublisher mockPublisher; + @Mock + private ProtocolRestJsonEndpointProvider mockEndpointProvider; + @Before public void setup() throws IOException { client = ProtocolRestJsonClient.builder() @@ -84,6 +92,7 @@ public void setup() throws IOException { .region(Region.US_WEST_2) .credentialsProvider(mockCredentialsProvider) .overrideConfiguration(c -> c.addMetricPublisher(mockPublisher).retryPolicy(b -> b.numRetries(MAX_RETRIES))) + .endpointProvider(mockEndpointProvider) .build(); AbortableInputStream content = contentStream("{}"); SdkHttpFullResponse httpResponse = SdkHttpFullResponse.builder() @@ -116,6 +125,11 @@ public void setup() throws IOException { } return AwsBasicCredentials.create("foo", "bar"); }); + + when(mockEndpointProvider.resolveEndpoint(any(ProtocolRestJsonEndpointParams.class))).thenReturn( + CompletableFuture.completedFuture(Endpoint.builder() + .url(URI.create("https://protocolrestjson.amazonaws.com")) + .build())); } @After @@ -183,6 +197,8 @@ public void testApiCall_operationSuccessful_addsMetrics() { assertThat(capturedCollection.metricValues(CoreMetric.MARSHALLING_DURATION).get(0)) .isGreaterThanOrEqualTo(Duration.ZERO); assertThat(capturedCollection.metricValues(CoreMetric.RETRY_COUNT)).containsExactly(0); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ENDPOINT).get(0)).isEqualTo(URI.create( + "https://protocolrestjson.amazonaws.com")); assertThat(capturedCollection.children()).hasSize(1); MetricCollection attemptCollection = capturedCollection.children().get(0); @@ -280,6 +296,24 @@ public void testApiCall_httpClientThrowsNetworkError_errorTypeIncludedInMetrics( } } + @Test + public void testApiCall_endpointProviderAddsPathQueryFragment_notReportedInServiceEndpointMetric() { + when(mockEndpointProvider.resolveEndpoint(any(ProtocolRestJsonEndpointParams.class))) + .thenReturn(CompletableFuture.completedFuture(Endpoint.builder() + .url(URI.create("https://protocolrestjson.amazonaws.com:8080/foo?bar#baz")) + .build())); + + client.allTypes(); + + ArgumentCaptor collectionCaptor = ArgumentCaptor.forClass(MetricCollection.class); + verify(mockPublisher).publish(collectionCaptor.capture()); + + MetricCollection capturedCollection = collectionCaptor.getValue(); + + URI expectedServiceEndpoint = URI.create("https://protocolrestjson.amazonaws.com:8080"); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ENDPOINT)).containsExactly(expectedServiceEndpoint); + } + private static HttpExecuteResponse mockExecuteResponse(SdkHttpFullResponse httpResponse) { HttpExecuteResponse mockResponse = mock(HttpExecuteResponse.class); diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java index 4ed2df722d6..2054a7ea5d3 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/metrics/async/BaseAsyncCoreMetricsTest.java @@ -203,6 +203,8 @@ private void verifyApiCallCollection(MetricCollection capturedCollection) { .isGreaterThanOrEqualTo(Duration.ZERO); assertThat(capturedCollection.metricValues(CoreMetric.API_CALL_DURATION).get(0)) .isGreaterThan(FIXED_DELAY); + assertThat(capturedCollection.metricValues(CoreMetric.SERVICE_ENDPOINT).get(0)).toString() + .startsWith("http://localhost"); } void stubSuccessfulResponse() { diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index abfc2e70578..5e23aecb4fe 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index 2418d405222..acc2d08690a 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 9a2606614d5..b56cb0c6c91 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index ee1b2a0cb04..dffe4b2b773 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/region-testing/pom.xml b/test/region-testing/pom.xml index 89182d40cbc..5acbca22ef0 100644 --- a/test/region-testing/pom.xml +++ b/test/region-testing/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/ruleset-testing-core/pom.xml b/test/ruleset-testing-core/pom.xml index f3716ce9e82..e0138514547 100644 --- a/test/ruleset-testing-core/pom.xml +++ b/test/ruleset-testing-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-benchmarks/.scripts/benchmark b/test/s3-benchmarks/.scripts/benchmark index 4fb56f0d443..5eaa2182d03 100755 --- a/test/s3-benchmarks/.scripts/benchmark +++ b/test/s3-benchmarks/.scripts/benchmark @@ -64,7 +64,7 @@ if [ ! -d result ]; then fi sizes_str="1B 8MB+1 8MB-1 128MB 4GB 30GB" -versions_str="v1 v2 CRT" +versions_str="v1 v2 CRT java" sizes=( $sizes_str ) versions=( $versions_str ) diff --git a/test/s3-benchmarks/.scripts/create_benchmark_files b/test/s3-benchmarks/.scripts/create_benchmark_files index a43725314cf..20f196a6bd8 100755 --- a/test/s3-benchmarks/.scripts/create_benchmark_files +++ b/test/s3-benchmarks/.scripts/create_benchmark_files @@ -1,14 +1,13 @@ -head -c 1B /dev/shm/1B -head -c 8388607B /dev/shm/8MB-1 -head -c 8388609B /dev/shm/8MB+1 -head -c 128M /dev/shm/128MB -head -c 4B /dev/shm/4GB -head -c 30GB /dev/shm/30GB - -head -c 1B /1B -head -c 8388607B /8MB-1 -head -c 8388609B /8MB+1 -head -c 128M /128MB -head -c 4B /4GB -head -c 30GB /30GB +head -c 1 /dev/shm/1B +head -c $((8*1024*1024-1)) /dev/shm/8MB-1 +head -c $((8*1024*1024+1)) /dev/shm/8MB+1 +head -c $((128*1024*1024)) /dev/shm/128MB +head -c $((4*1024*1024*1024)) /dev/shm/4GB +head -c $((30*1024*1024*1024)) /dev/shm/30GB +head -c 1 /1B +head -c $((8*1024*1024-1)) /8MB-1 +head -c $((8*1024*1024+1)) /8MB+1 +head -c $((128*1024*1024)) /128MB +head -c $((4*1024*1024*1024)) /4GB +head -c $((30*1024*1024*1024)) /30GB diff --git a/test/s3-benchmarks/README.md b/test/s3-benchmarks/README.md index 74a7436ba92..5f0dc4bc644 100755 --- a/test/s3-benchmarks/README.md +++ b/test/s3-benchmarks/README.md @@ -1,7 +1,6 @@ # S3 Benchmark Harness - -This module contains performance tests for `S3AsyncClient` and +This module contains performance tests for `S3AsyncClient` and `S3TransferManager` ## How to run @@ -17,6 +16,31 @@ java -jar s3-benchmarks.jar --bucket=bucket --key=key -file=/path/to/destionfile java -jar s3-benchmarks.jar --bucket=bucket --key=key -file=/path/to/sourcefile/ --operation=upload --partSizeInMB=20 --maxThroughput=100.0 ``` +## Command line arguments + +### Benchmark version + +The `--version` command line option is used to determine which component is under test: + +- `--version=crt` : Indicate to run the benchmark for the CRT's S3Client +- `--version=java` : Indicate to run the benchmark for the java based S3 Async Client (`MultipartS3AsyncClient` class) +- `--version=v2`: SDK v2 transfer manager (using `S3CrtAsyncClient` to delegate requests) +- `--version=v1`: SDK v1 transfer manager (using `AmazonS3Client` to delegate requests) + +### Operation + +The `--operation` command line argument determine which transfer operation is used + +|operation|supported version| +|---|-------| +|download | v1 v2 java crt | +|upload | v1 v2 java crt | +|download_directory | v1 v2 | +|upload_directory | v1 v2 | +|copy | v1 v2 java | + +> All command line argument can be found in the `BenchmarkRunner` class. + # Benchmark scripts Automation From the `.script` folder, use one of the `benchamrk` scripts to run a test suite. diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 366bfa81ff6..c16621ee295 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 @@ -87,6 +87,16 @@ log4j-slf4j-impl compile + + software.amazon.awssdk + netty-nio-client + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-crt-client + ${awsjavasdk.version} + diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseJavaS3ClientBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseJavaS3ClientBenchmark.java new file mode 100644 index 00000000000..cf8e71246be --- /dev/null +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseJavaS3ClientBenchmark.java @@ -0,0 +1,130 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.s3benchmarks; + +import static software.amazon.awssdk.s3benchmarks.BenchmarkUtils.BENCHMARK_ITERATIONS; +import static software.amazon.awssdk.s3benchmarks.BenchmarkUtils.DEFAULT_TIMEOUT; +import static software.amazon.awssdk.s3benchmarks.BenchmarkUtils.printOutResult; +import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.crt.AwsCrtAsyncHttpClient; +import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +public abstract class BaseJavaS3ClientBenchmark implements TransferManagerBenchmark { + private static final Logger logger = Logger.loggerFor(BaseJavaS3ClientBenchmark.class); + + protected final S3Client s3Client; + + protected final S3AsyncClient s3AsyncClient; + protected final String bucket; + protected final String key; + protected final Duration timeout; + private final ChecksumAlgorithm checksumAlgorithm; + private final int iteration; + + protected BaseJavaS3ClientBenchmark(TransferManagerBenchmarkConfig config) { + this.bucket = Validate.paramNotNull(config.bucket(), "bucket"); + this.key = Validate.paramNotNull(config.key(), "key"); + this.timeout = Validate.getOrDefault(config.timeout(), () -> DEFAULT_TIMEOUT); + this.iteration = Validate.getOrDefault(config.iteration(), () -> BENCHMARK_ITERATIONS); + this.checksumAlgorithm = config.checksumAlgorithm(); + + this.s3Client = S3Client.create(); + + long partSizeInMb = Validate.paramNotNull(config.partSizeInMb(), "partSize"); + long readBufferInMb = Validate.paramNotNull(config.readBufferSizeInMb(), "readBufferSizeInMb"); + Validate.mutuallyExclusive("cannot use forceCrtHttpClient and connectionAcquisitionTimeoutInSec", + config.forceCrtHttpClient(), config.connectionAcquisitionTimeoutInSec()); + this.s3AsyncClient = S3AsyncClient.builder() + .multipartEnabled(true) + .multipartConfiguration(c -> c.minimumPartSizeInBytes(partSizeInMb * MB) + .thresholdInBytes(partSizeInMb * 2 * MB) + .apiCallBufferSizeInBytes(readBufferInMb * MB)) + .httpClientBuilder(httpClient(config)) + .build(); + } + + private SdkAsyncHttpClient.Builder httpClient(TransferManagerBenchmarkConfig config) { + if (config.forceCrtHttpClient()) { + logger.info(() -> "Using CRT HTTP client"); + AwsCrtAsyncHttpClient.Builder builder = AwsCrtAsyncHttpClient.builder(); + if (config.readBufferSizeInMb() != null) { + builder.readBufferSizeInBytes(config.readBufferSizeInMb() * MB); + } + if (config.maxConcurrency() != null) { + builder.maxConcurrency(config.maxConcurrency()); + } + return builder; + } + NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder(); + if (config.connectionAcquisitionTimeoutInSec() != null) { + Duration connAcqTimeout = Duration.ofSeconds(config.connectionAcquisitionTimeoutInSec()); + builder.connectionAcquisitionTimeout(connAcqTimeout); + } + if (config.maxConcurrency() != null) { + builder.maxConcurrency(config.maxConcurrency()); + } + return builder; + } + + protected abstract void sendOneRequest(List latencies) throws Exception; + + protected abstract long contentLength() throws Exception; + + @Override + public void run() { + try { + warmUp(); + doRunBenchmark(); + } catch (Exception e) { + logger.error(() -> "Exception occurred", e); + } finally { + cleanup(); + } + } + + private void cleanup() { + s3Client.close(); + s3AsyncClient.close(); + } + + private void warmUp() throws Exception { + logger.info(() -> "Starting to warm up"); + for (int i = 0; i < 3; i++) { + sendOneRequest(new ArrayList<>()); + Thread.sleep(500); + } + logger.info(() -> "Ending warm up"); + } + + private void doRunBenchmark() throws Exception { + List metrics = new ArrayList<>(); + for (int i = 0; i < iteration; i++) { + sendOneRequest(metrics); + } + printOutResult(metrics, "S3 Async client", contentLength()); + } + +} diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseTransferManagerBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseTransferManagerBenchmark.java index c4370033c67..d40680bca02 100644 --- a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseTransferManagerBenchmark.java +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BaseTransferManagerBenchmark.java @@ -34,6 +34,7 @@ import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.S3CrtAsyncClientBuilder; import software.amazon.awssdk.services.s3.internal.crt.S3CrtAsyncClient; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; @@ -61,15 +62,18 @@ public abstract class BaseTransferManagerBenchmark implements TransferManagerBen logger.info(() -> "Benchmark config: " + config); Long partSizeInMb = config.partSizeInMb() == null ? null : config.partSizeInMb() * MB; Long readBufferSizeInMb = config.readBufferSizeInMb() == null ? null : config.readBufferSizeInMb() * MB; - s3 = S3CrtAsyncClient.builder() - .targetThroughputInGbps(config.targetThroughput()) - .minimumPartSizeInBytes(partSizeInMb) - .initialReadBufferSizeInBytes(readBufferSizeInMb) - .targetThroughputInGbps(config.targetThroughput() == null ? - Double.valueOf(100.0) : config.targetThroughput()) - .build(); - s3Sync = S3Client.builder() - .build(); + S3CrtAsyncClientBuilder builder = S3CrtAsyncClient.builder() + .targetThroughputInGbps(config.targetThroughput()) + .minimumPartSizeInBytes(partSizeInMb) + .initialReadBufferSizeInBytes(readBufferSizeInMb) + .targetThroughputInGbps(config.targetThroughput() == null ? + Double.valueOf(100.0) : + config.targetThroughput()); + if (config.maxConcurrency() != null) { + builder.maxConcurrency(config.maxConcurrency()); + } + s3 = builder.build(); + s3Sync = S3Client.builder().build(); transferManager = S3TransferManager.builder() .s3Client(s3) .build(); diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BenchmarkRunner.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BenchmarkRunner.java index 6f1dfaecc70..d83fc87026a 100644 --- a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BenchmarkRunner.java +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/BenchmarkRunner.java @@ -45,6 +45,11 @@ public final class BenchmarkRunner { private static final String TIMEOUT = "timeoutInMin"; + private static final String CONN_ACQ_TIMEOUT_IN_SEC = "connAcqTimeoutInSec"; + + private static final String FORCE_CRT_HTTP_CLIENT = "crtHttp"; + private static final String MAX_CONCURRENCY = "maxConcurrency"; + private static final Map> OPERATION_TO_BENCHMARK_V1 = new EnumMap<>(TransferManagerOperation.class); private static final Map> @@ -83,8 +88,8 @@ public static void main(String... args) throws org.apache.commons.cli.ParseExcep options.addOption(null, CHECKSUM_ALGORITHM, true, "The checksum algorithm to use"); options.addOption(null, ITERATION, true, "The number of iterations"); options.addOption(null, READ_BUFFER_IN_MB, true, "Read buffer size in MB"); - options.addOption(null, VERSION, true, "The major version of the transfer manager to run test: v1 | v2 | crt, default: " - + "v2"); + options.addOption(null, VERSION, true, "The major version of the transfer manager to run test: " + + "v1 | v2 | crt | java, default: v2"); options.addOption(null, PREFIX, true, "S3 Prefix used in downloadDirectory and uploadDirectory"); options.addOption(null, CONTENT_LENGTH, true, "Content length to upload from memory. Used only in the " @@ -93,6 +98,12 @@ public static void main(String... args) throws org.apache.commons.cli.ParseExcep options.addOption(null, TIMEOUT, true, "Amount of minute to wait before a single operation " + "times out and is cancelled. Optional, defaults to 10 minutes if no specified"); + options.addOption(null, CONN_ACQ_TIMEOUT_IN_SEC, true, "Timeout for acquiring an already-established" + + " connection from a connection pool to a remote service."); + options.addOption(null, FORCE_CRT_HTTP_CLIENT, true, + "Force the CRT http client to be used in JavaBased benchmarks"); + options.addOption(null, MAX_CONCURRENCY, true, + "The Maximum number of allowed concurrent requests. For HTTP/1.1 this is the same as max connections."); CommandLine cmd = parser.parse(options, args); TransferManagerBenchmarkConfig config = parseConfig(cmd); @@ -114,11 +125,22 @@ public static void main(String... args) throws org.apache.commons.cli.ParseExcep if (operation == TransferManagerOperation.DOWNLOAD) { benchmark = new CrtS3ClientDownloadBenchmark(config); break; - } else if (operation == TransferManagerOperation.UPLOAD) { + } + if (operation == TransferManagerOperation.UPLOAD) { benchmark = new CrtS3ClientUploadBenchmark(config); break; } throw new UnsupportedOperationException(); + case JAVA: + if (operation == TransferManagerOperation.UPLOAD) { + benchmark = new JavaS3ClientUploadBenchmark(config); + break; + } + if (operation == TransferManagerOperation.COPY) { + benchmark = new JavaS3ClientCopyBenchmark(config); + break; + } + throw new UnsupportedOperationException("Java based s3 client benchmark only support upload and copy"); default: throw new UnsupportedOperationException(); } @@ -158,6 +180,15 @@ private static TransferManagerBenchmarkConfig parseConfig(CommandLine cmd) { Duration timeout = cmd.getOptionValue(TIMEOUT) == null ? null : Duration.ofMinutes(Long.parseLong(cmd.getOptionValue(TIMEOUT))); + Long connAcqTimeoutInSec = cmd.getOptionValue(CONN_ACQ_TIMEOUT_IN_SEC) == null ? null : + Long.parseLong(cmd.getOptionValue(CONN_ACQ_TIMEOUT_IN_SEC)); + + Boolean forceCrtHttpClient = cmd.getOptionValue(FORCE_CRT_HTTP_CLIENT) != null + && Boolean.parseBoolean(cmd.getOptionValue(FORCE_CRT_HTTP_CLIENT)); + + Integer maxConcurrency = cmd.getOptionValue(MAX_CONCURRENCY) == null ? null : + Integer.parseInt(cmd.getOptionValue(MAX_CONCURRENCY)); + return TransferManagerBenchmarkConfig.builder() .key(key) .bucket(bucket) @@ -171,6 +202,9 @@ private static TransferManagerBenchmarkConfig parseConfig(CommandLine cmd) { .prefix(prefix) .contentLengthInMb(contentLengthInMb) .timeout(timeout) + .connectionAcquisitionTimeoutInSec(connAcqTimeoutInSec) + .forceCrtHttpClient(forceCrtHttpClient) + .maxConcurrency(maxConcurrency) .build(); } @@ -185,6 +219,7 @@ public enum TransferManagerOperation { private enum SdkVersion { V1, V2, - CRT + CRT, + JAVA } } diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientCopyBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientCopyBenchmark.java new file mode 100644 index 00000000000..a2798a0e9cf --- /dev/null +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientCopyBenchmark.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.s3benchmarks; + +import static software.amazon.awssdk.s3benchmarks.BenchmarkUtils.COPY_SUFFIX; + +import java.util.List; +import software.amazon.awssdk.utils.Logger; + +public class JavaS3ClientCopyBenchmark extends BaseJavaS3ClientBenchmark { + private static final Logger log = Logger.loggerFor(JavaS3ClientCopyBenchmark.class); + + public JavaS3ClientCopyBenchmark(TransferManagerBenchmarkConfig config) { + super(config); + } + + @Override + protected void sendOneRequest(List latencies) throws Exception { + log.info(() -> "Starting copy"); + Double latency = runWithTime(s3AsyncClient.copyObject( + req -> req.sourceKey(key).sourceBucket(bucket) + .destinationBucket(bucket).destinationKey(key + COPY_SUFFIX) + )::join).latency(); + latencies.add(latency); + } + + @Override + protected long contentLength() throws Exception { + return s3Client.headObject(b -> b.bucket(bucket).key(key)).contentLength(); + } +} diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientUploadBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientUploadBenchmark.java new file mode 100644 index 00000000000..07aec5448a4 --- /dev/null +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/JavaS3ClientUploadBenchmark.java @@ -0,0 +1,95 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.s3benchmarks; + +import static software.amazon.awssdk.transfer.s3.SizeConstant.MB; + +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.utils.async.SimplePublisher; + +public class JavaS3ClientUploadBenchmark extends BaseJavaS3ClientBenchmark { + + private final String filePath; + private final Long contentLengthInMb; + private final Long partSizeInMb; + private final ChecksumAlgorithm checksumAlgorithm; + + public JavaS3ClientUploadBenchmark(TransferManagerBenchmarkConfig config) { + super(config); + this.filePath = config.filePath(); + this.contentLengthInMb = config.contentLengthInMb(); + this.partSizeInMb = config.partSizeInMb(); + this.checksumAlgorithm = config.checksumAlgorithm(); + } + + @Override + protected void sendOneRequest(List latencies) throws Exception { + if (filePath == null) { + double latency = uploadFromMemory(); + latencies.add(latency); + return; + } + Double latency = runWithTime( + s3AsyncClient.putObject(req -> req.key(key).bucket(bucket).checksumAlgorithm(checksumAlgorithm), + Paths.get(filePath))::join).latency(); + latencies.add(latency); + } + + private double uploadFromMemory() throws Exception { + if (contentLengthInMb == null) { + throw new UnsupportedOperationException("Java upload benchmark - contentLengthInMb required for upload from memory"); + } + long partSizeInBytes = partSizeInMb * MB; + // upload using known content length + SimplePublisher publisher = new SimplePublisher<>(); + byte[] bytes = new byte[(int) partSizeInBytes]; + Thread uploadThread = Executors.defaultThreadFactory().newThread(() -> { + long remaining = contentLengthInMb * MB; + while (remaining > 0) { + publisher.send(ByteBuffer.wrap(bytes)); + remaining -= partSizeInBytes; + } + publisher.complete(); + }); + CompletableFuture responseFuture = + s3AsyncClient.putObject(r -> r.bucket(bucket) + .key(key) + .contentLength(contentLengthInMb * MB) + .checksumAlgorithm(checksumAlgorithm), + AsyncRequestBody.fromPublisher(publisher)); + uploadThread.start(); + long start = System.currentTimeMillis(); + responseFuture.get(timeout.getSeconds(), TimeUnit.SECONDS); + long end = System.currentTimeMillis(); + return (end - start) / 1000.0; + } + + @Override + protected long contentLength() throws Exception { + return filePath != null + ? Files.size(Paths.get(filePath)) + : contentLengthInMb * MB; + } +} diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmark.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmark.java index 89f3362bc65..c182934f4e3 100644 --- a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmark.java +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmark.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.s3benchmarks; +import java.util.function.Supplier; + /** * Factory to create the benchmark */ @@ -66,4 +68,29 @@ static TransferManagerBenchmark v1Copy(TransferManagerBenchmarkConfig config) { return new V1TransferManagerCopyBenchmark(config); } + default TimedResult runWithTime(Supplier toRun) { + long start = System.currentTimeMillis(); + T result = toRun.get(); + long end = System.currentTimeMillis(); + return new TimedResult<>(result, (end - start) / 1000.0); + } + + final class TimedResult { + private final Double latency; + private final T result; + + public TimedResult(T result, Double latency) { + this.result = result; + this.latency = latency; + } + + public Double latency() { + return latency; + } + + public T result() { + return result; + } + + } } diff --git a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmarkConfig.java b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmarkConfig.java index b0a6f85a38c..a3750f47249 100644 --- a/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmarkConfig.java +++ b/test/s3-benchmarks/src/main/java/software/amazon/awssdk/s3benchmarks/TransferManagerBenchmarkConfig.java @@ -17,6 +17,7 @@ import java.time.Duration; import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; +import software.amazon.awssdk.utils.ToString; public final class TransferManagerBenchmarkConfig { private final String filePath; @@ -28,6 +29,10 @@ public final class TransferManagerBenchmarkConfig { private final Integer iteration; private final Long contentLengthInMb; private final Duration timeout; + private final Long memoryUsageInMb; + private final Long connectionAcquisitionTimeoutInSec; + private final Boolean forceCrtHttpClient; + private final Integer maxConcurrency; private final Long readBufferSizeInMb; private final BenchmarkRunner.TransferManagerOperation operation; @@ -46,6 +51,10 @@ private TransferManagerBenchmarkConfig(Builder builder) { this.prefix = builder.prefix; this.contentLengthInMb = builder.contentLengthInMb; this.timeout = builder.timeout; + this.memoryUsageInMb = builder.memoryUsage; + this.connectionAcquisitionTimeoutInSec = builder.connectionAcquisitionTimeoutInSec; + this.forceCrtHttpClient = builder.forceCrtHttpClient; + this.maxConcurrency = builder.maxConcurrency; } public String filePath() { @@ -96,25 +105,46 @@ public Duration timeout() { return this.timeout; } + public Long memoryUsageInMb() { + return this.memoryUsageInMb; + } + + public Long connectionAcquisitionTimeoutInSec() { + return this.connectionAcquisitionTimeoutInSec; + } + + public boolean forceCrtHttpClient() { + return this.forceCrtHttpClient; + } + + public Integer maxConcurrency() { + return this.maxConcurrency; + } + public static Builder builder() { return new Builder(); } @Override public String toString() { - return "{" + - "filePath: '" + filePath + '\'' + - ", bucket: '" + bucket + '\'' + - ", key: '" + key + '\'' + - ", targetThroughput: " + targetThroughput + - ", partSizeInMb: " + partSizeInMb + - ", checksumAlgorithm: " + checksumAlgorithm + - ", iteration: " + iteration + - ", readBufferSizeInMb: " + readBufferSizeInMb + - ", operation: " + operation + - ", contentLengthInMb: " + contentLengthInMb + - ", timeout:" + timeout + - '}'; + return ToString.builder("TransferManagerBenchmarkConfig") + .add("filePath", filePath) + .add("bucket", bucket) + .add("key", key) + .add("targetThroughput", targetThroughput) + .add("partSizeInMb", partSizeInMb) + .add("checksumAlgorithm", checksumAlgorithm) + .add("iteration", iteration) + .add("contentLengthInMb", contentLengthInMb) + .add("timeout", timeout) + .add("memoryUsageInMb", memoryUsageInMb) + .add("connectionAcquisitionTimeoutInSec", connectionAcquisitionTimeoutInSec) + .add("forceCrtHttpClient", forceCrtHttpClient) + .add("maxConcurrency", maxConcurrency) + .add("readBufferSizeInMb", readBufferSizeInMb) + .add("operation", operation) + .add("prefix", prefix) + .build(); } static final class Builder { @@ -126,6 +156,10 @@ static final class Builder { private Double targetThroughput; private Long partSizeInMb; private Long contentLengthInMb; + private Long memoryUsage; + private Long connectionAcquisitionTimeoutInSec; + private Boolean forceCrtHttpClient; + private Integer maxConcurrency; private Integer iteration; private BenchmarkRunner.TransferManagerOperation operation; @@ -193,6 +227,26 @@ public Builder timeout(Duration timeout) { return this; } + public Builder memoryUsageInMb(Long memoryUsage) { + this.memoryUsage = memoryUsage; + return this; + } + + public Builder connectionAcquisitionTimeoutInSec(Long connectionAcquisitionTimeoutInSec) { + this.connectionAcquisitionTimeoutInSec = connectionAcquisitionTimeoutInSec; + return this; + } + + public Builder forceCrtHttpClient(Boolean forceCrtHttpClient) { + this.forceCrtHttpClient = forceCrtHttpClient; + return this; + } + + public Builder maxConcurrency(Integer maxConcurrency) { + this.maxConcurrency = maxConcurrency; + return this; + } + public TransferManagerBenchmarkConfig build() { return new TransferManagerBenchmarkConfig(this); } diff --git a/test/s3-benchmarks/src/main/resources/log4j2.properties b/test/s3-benchmarks/src/main/resources/log4j2.properties index 58a399c44f1..e4d18ecc6ea 100644 --- a/test/s3-benchmarks/src/main/resources/log4j2.properties +++ b/test/s3-benchmarks/src/main/resources/log4j2.properties @@ -43,3 +43,6 @@ rootLogger.appenderRef.file.ref = FileAppender # #logger.netty.name = io.netty.handler.logging #logger.netty.level = debug + +#logger.s3mpu.name = software.amazon.awssdk.services.s3.internal.multipart +#logger.s3mpu.level = debug \ No newline at end of file diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index c2ad4ff9dc7..32cd1ea89d7 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml @@ -205,6 +205,11 @@ ${awsjavasdk.version} compile + + commons-cli + commons-cli + compile + @@ -368,6 +373,8 @@ -classpath software.amazon.awssdk.benchmark.BenchmarkRunner + + -c diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkResultProcessor.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkResultProcessor.java index 580471fa1a3..938aa0de3c0 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkResultProcessor.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkResultProcessor.java @@ -24,6 +24,7 @@ import java.net.URL; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -34,6 +35,7 @@ import software.amazon.awssdk.benchmark.stats.SdkBenchmarkParams; import software.amazon.awssdk.benchmark.stats.SdkBenchmarkResult; import software.amazon.awssdk.benchmark.stats.SdkBenchmarkStatistics; +import software.amazon.awssdk.benchmark.utils.BenchmarkProcessorOutput; import software.amazon.awssdk.utils.Logger; @@ -66,15 +68,18 @@ class BenchmarkResultProcessor { * Process benchmark results * * @param results the results of the benchmark - * @return the benchmark Id that failed the regression + * @return the benchmark results */ - List processBenchmarkResult(Collection results) { - List currentData = new ArrayList<>(); + BenchmarkProcessorOutput processBenchmarkResult(Collection results) { + Map benchmarkResults = new HashMap<>(); + for (RunResult result : results) { String benchmarkId = getBenchmarkId(result.getParams()); + SdkBenchmarkResult sdkBenchmarkData = constructSdkBenchmarkResult(result); + + benchmarkResults.put(benchmarkId, sdkBenchmarkData); SdkBenchmarkResult baselineResult = baseline.get(benchmarkId); - SdkBenchmarkResult sdkBenchmarkData = constructSdkBenchmarkResult(result); if (baselineResult == null) { log.warn(() -> { @@ -90,15 +95,14 @@ List processBenchmarkResult(Collection results) { continue; } - currentData.add(sdkBenchmarkData); - if (!validateBenchmarkResult(sdkBenchmarkData, baselineResult)) { failedBenchmarkIds.add(benchmarkId); } } - log.info(() -> "Current result: " + serializeResult(currentData)); - return failedBenchmarkIds; + BenchmarkProcessorOutput output = new BenchmarkProcessorOutput(benchmarkResults, failedBenchmarkIds); + log.info(() -> "Current result: " + serializeResult(output)); + return output; } private SdkBenchmarkResult constructSdkBenchmarkResult(RunResult runResult) { @@ -169,9 +173,9 @@ private boolean validateBenchmarkParams(SdkBenchmarkParams current, SdkBenchmark return current.getMode() == baseline.getMode(); } - private String serializeResult(List currentData) { + private String serializeResult(BenchmarkProcessorOutput processorOutput) { try { - return OBJECT_MAPPER.writeValueAsString(currentData); + return OBJECT_MAPPER.writeValueAsString(processorOutput); } catch (JsonProcessingException e) { log.error(() -> "Failed to serialize current result", e); } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkRunner.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkRunner.java index 92ca28d12ac..4c49f0270a8 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkRunner.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/BenchmarkRunner.java @@ -15,11 +15,23 @@ package software.amazon.awssdk.benchmark; -import com.fasterxml.jackson.core.JsonProcessingException; +import static software.amazon.awssdk.benchmark.utils.BenchmarkConstant.OBJECT_MAPPER; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.stream.Collectors; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; import org.openjdk.jmh.results.RunResult; import org.openjdk.jmh.runner.Runner; import org.openjdk.jmh.runner.RunnerException; @@ -45,6 +57,8 @@ import software.amazon.awssdk.benchmark.enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark; import software.amazon.awssdk.benchmark.enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark; import software.amazon.awssdk.benchmark.enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark; +import software.amazon.awssdk.benchmark.stats.SdkBenchmarkResult; +import software.amazon.awssdk.benchmark.utils.BenchmarkProcessorOutput; import software.amazon.awssdk.utils.Logger; @@ -84,13 +98,15 @@ public class BenchmarkRunner { private final List benchmarksToRun; private final BenchmarkResultProcessor resultProcessor; + private final BenchmarkRunnerOptions options; - private BenchmarkRunner(List benchmarksToRun) { + private BenchmarkRunner(List benchmarksToRun, BenchmarkRunnerOptions options) { this.benchmarksToRun = benchmarksToRun; this.resultProcessor = new BenchmarkResultProcessor(); + this.options = options; } - public static void main(String... args) throws RunnerException, JsonProcessingException { + public static void main(String... args) throws Exception { List benchmarksToRun = new ArrayList<>(); benchmarksToRun.addAll(SYNC_BENCHMARKS); benchmarksToRun.addAll(ASYNC_BENCHMARKS); @@ -99,13 +115,14 @@ public static void main(String... args) throws RunnerException, JsonProcessingEx log.info(() -> "Skipping tests, to reduce benchmark times: \n" + MAPPER_BENCHMARKS + "\n" + METRIC_BENCHMARKS); - - BenchmarkRunner runner = new BenchmarkRunner(benchmarksToRun); + BenchmarkRunner runner = new BenchmarkRunner(benchmarksToRun, parseOptions(args)); runner.runBenchmark(); } private void runBenchmark() throws RunnerException { + log.info(() -> "Running with options: " + options); + ChainedOptionsBuilder optionsBuilder = new OptionsBuilder(); benchmarksToRun.forEach(optionsBuilder::include); @@ -114,11 +131,70 @@ private void runBenchmark() throws RunnerException { Collection results = new Runner(optionsBuilder.build()).run(); - List failedResult = resultProcessor.processBenchmarkResult(results); + BenchmarkProcessorOutput processedResults = resultProcessor.processBenchmarkResult(results); + List failedResults = processedResults.getFailedBenchmarks(); + + if (options.outputPath != null) { + log.info(() -> "Writing results to " + options.outputPath); + writeResults(processedResults, options.outputPath); + } + + if (options.check && !failedResults.isEmpty()) { + log.info(() -> "Failed perf regression tests: " + failedResults); + throw new RuntimeException("Perf regression tests failed: " + failedResults); + } + } + + private static BenchmarkRunnerOptions parseOptions(String[] args) throws ParseException { + Options cliOptions = new Options(); + cliOptions.addOption("o", "output", true, + "The path to write the benchmark results to."); + cliOptions.addOption("c", "check", false, + "If specified, exit with error code 1 if the results are not within the baseline."); + + CommandLineParser parser = new DefaultParser(); + CommandLine cmdLine = parser.parse(cliOptions, args); + + BenchmarkRunnerOptions options = new BenchmarkRunnerOptions() + .check(cmdLine.hasOption("c")); + + if (cmdLine.hasOption("o")) { + options.outputPath(Paths.get(cmdLine.getOptionValue("o"))); + } + + return options; + } + + private static void writeResults(BenchmarkProcessorOutput output, Path outputPath) { + List results = output.getBenchmarkResults().values().stream().collect(Collectors.toList()); + try (OutputStream os = Files.newOutputStream(outputPath)) { + OBJECT_MAPPER.writeValue(os, results); + } catch (IOException e) { + log.error(() -> "Failed to write the results to " + outputPath, e); + throw new RuntimeException(e); + } + } + + private static class BenchmarkRunnerOptions { + private Path outputPath; + private boolean check; + + public BenchmarkRunnerOptions outputPath(Path outputPath) { + this.outputPath = outputPath; + return this; + } + + public BenchmarkRunnerOptions check(boolean check) { + this.check = check; + return this; + } - if (!failedResult.isEmpty()) { - log.info(() -> "Failed perf regression tests: " + failedResult); - throw new RuntimeException("Perf regression tests failed: " + failedResult); + @Override + public String toString() { + return "BenchmarkRunnerOptions{" + + "outputPath=" + outputPath + + ", check=" + check + + '}'; } } } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/stats/SdkBenchmarkParams.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/stats/SdkBenchmarkParams.java index f419405bb69..c04bcc9dca5 100644 --- a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/stats/SdkBenchmarkParams.java +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/stats/SdkBenchmarkParams.java @@ -17,7 +17,6 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonSerializer; @@ -25,7 +24,7 @@ import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import java.io.IOException; -import java.time.LocalDateTime; +import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import org.openjdk.jmh.annotations.Mode; import org.openjdk.jmh.infra.BenchmarkParams; @@ -46,9 +45,9 @@ public class SdkBenchmarkParams { private Mode mode; - @JsonSerialize(using = LocalDateSerializer.class) - @JsonDeserialize(using = LocalDateDeserializer.class) - private LocalDateTime date; + @JsonSerialize(using = ZonedDateSerializer.class) + @JsonDeserialize(using = ZonedDateDeserializer.class) + private ZonedDateTime date; public SdkBenchmarkParams() { } @@ -59,7 +58,7 @@ public SdkBenchmarkParams(BenchmarkParams benchmarkParams) { this.jvmName = benchmarkParams.getVmName(); this.jvmVersion = benchmarkParams.getVmVersion(); this.mode = benchmarkParams.getMode(); - this.date = LocalDateTime.now(); + this.date = ZonedDateTime.now(); } public String getSdkVersion() { @@ -94,11 +93,11 @@ public void setJvmVersion(String jvmVersion) { this.jvmVersion = jvmVersion; } - public LocalDateTime getDate() { + public ZonedDateTime getDate() { return date; } - public void setDate(LocalDateTime date) { + public void setDate(ZonedDateTime date) { this.date = date; } @@ -110,18 +109,18 @@ public void setMode(Mode mode) { this.mode = mode; } - private static class LocalDateSerializer extends JsonSerializer { + private static class ZonedDateSerializer extends JsonSerializer { @Override - public void serialize(LocalDateTime value, JsonGenerator gen, SerializerProvider serializers) throws IOException { - gen.writeString(value.format(DateTimeFormatter.ISO_LOCAL_DATE_TIME)); + public void serialize(ZonedDateTime value, JsonGenerator gen, SerializerProvider serializers) throws IOException { + gen.writeString(value.format(DateTimeFormatter.ISO_ZONED_DATE_TIME)); } } - private static class LocalDateDeserializer extends JsonDeserializer { + private static class ZonedDateDeserializer extends JsonDeserializer { @Override - public LocalDateTime deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException { - return LocalDateTime.parse(p.readValueAs(String.class)); + public ZonedDateTime deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + return ZonedDateTime.parse(p.getValueAsString()); } } } diff --git a/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/utils/BenchmarkProcessorOutput.java b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/utils/BenchmarkProcessorOutput.java new file mode 100644 index 00000000000..902ac303473 --- /dev/null +++ b/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/utils/BenchmarkProcessorOutput.java @@ -0,0 +1,44 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.benchmark.utils; + +import com.fasterxml.jackson.annotation.JsonCreator; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.benchmark.stats.SdkBenchmarkResult; + +/** + * The output object of the benchmark processor. This contains the results of the all the benchmarks that were run, and the + * list of benchmarks that failed. + */ +public final class BenchmarkProcessorOutput { + private final Map benchmarkResults; + private final List failedBenchmarks; + + @JsonCreator + public BenchmarkProcessorOutput(Map benchmarkResults, List failedBenchmarks) { + this.benchmarkResults = benchmarkResults; + this.failedBenchmarks = failedBenchmarks; + } + + public Map getBenchmarkResults() { + return benchmarkResults; + } + + public List getFailedBenchmarks() { + return failedBenchmarks; + } +} diff --git a/test/sdk-benchmarks/src/main/resources/software/amazon/awssdk/benchmark/baseline.json b/test/sdk-benchmarks/src/main/resources/software/amazon/awssdk/benchmark/baseline.json index c09d97fbbfc..85489ea9439 100644 --- a/test/sdk-benchmarks/src/main/resources/software/amazon/awssdk/benchmark/baseline.json +++ b/test/sdk-benchmarks/src/main/resources/software/amazon/awssdk/benchmark/baseline.json @@ -7,7 +7,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.308" + "date": "2020-03-18T20:11:42.308-07:00[America/Los_Angeles]" }, "statistics": { "mean": 11083.712145086858, @@ -18,7 +18,8 @@ "n": 10, "sum": 110837.12145086858 } - }, { + }, + { "id": "apicall.httpclient.async.NettyClientH1NonTlsBenchmark.sequentialApiCall-Throughput", "params": { "sdkVersion": "2.10.89", @@ -26,7 +27,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.314" + "date": "2020-03-18T20:11:42.314-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3133.078992847664, @@ -37,7 +38,8 @@ "n": 10, "sum": 31330.78992847664 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH1Benchmark.concurrentApiCall-Throughput-sslProviderValue-jdk", "params": { "sdkVersion": "2.10.89", @@ -45,7 +47,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.314" + "date": "2020-03-18T20:11:42.314-07:00[America/Los_Angeles]" }, "statistics": { "mean": 9400.788325804802, @@ -56,7 +58,8 @@ "n": 10, "sum": 94007.88325804802 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH1Benchmark.concurrentApiCall-Throughput-sslProviderValue-openssl", "params": { "sdkVersion": "2.10.89", @@ -64,7 +67,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.314" + "date": "2020-03-18T20:11:42.314-07:00[America/Los_Angeles]" }, "statistics": { "mean": 10081.234880927226, @@ -75,7 +78,8 @@ "n": 10, "sum": 100812.34880927225 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH1Benchmark.sequentialApiCall-Throughput-sslProviderValue-jdk", "params": { "sdkVersion": "2.10.89", @@ -83,7 +87,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2318.064309904416, @@ -94,7 +98,8 @@ "n": 10, "sum": 23180.64309904416 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH1Benchmark.sequentialApiCall-Throughput-sslProviderValue-openssl", "params": { "sdkVersion": "2.10.89", @@ -102,7 +107,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2668.2980888540214, @@ -113,7 +118,8 @@ "n": 10, "sum": 26682.980888540213 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH2Benchmark.concurrentApiCall-Throughput-sslProviderValue-jdk", "params": { "sdkVersion": "2.10.89", @@ -121,7 +127,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6452.047990499835, @@ -132,7 +138,8 @@ "n": 10, "sum": 64520.47990499835 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH2Benchmark.concurrentApiCall-Throughput-sslProviderValue-openssl", "params": { "sdkVersion": "2.10.89", @@ -140,7 +147,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 7299.549654768969, @@ -151,7 +158,8 @@ "n": 10, "sum": 72995.49654768969 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH2Benchmark.sequentialApiCall-Throughput-sslProviderValue-jdk", "params": { "sdkVersion": "2.10.89", @@ -159,7 +167,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.315" + "date": "2020-03-18T20:11:42.315-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2253.2698214846414, @@ -170,7 +178,8 @@ "n": 10, "sum": 22532.698214846412 } - }, { + }, + { "id": "apicall.httpclient.async.NettyHttpClientH2Benchmark.sequentialApiCall-Throughput-sslProviderValue-openssl", "params": { "sdkVersion": "2.10.89", @@ -178,7 +187,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.316" + "date": "2020-03-18T20:11:42.316-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2349.62389971199, @@ -189,7 +198,8 @@ "n": 10, "sum": 23496.238997119897 } - }, { + }, + { "id": "apicall.httpclient.sync.ApacheHttpClientBenchmark.concurrentApiCall-Throughput", "params": { "sdkVersion": "2.10.89", @@ -197,7 +207,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.316" + "date": "2020-03-18T20:11:42.316-07:00[America/Los_Angeles]" }, "statistics": { "mean": 15097.57607845867, @@ -208,7 +218,8 @@ "n": 10, "sum": 150975.7607845867 } - }, { + }, + { "id": "apicall.httpclient.sync.ApacheHttpClientBenchmark.sequentialApiCall-Throughput", "params": { "sdkVersion": "2.10.89", @@ -216,7 +227,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.316" + "date": "2020-03-18T20:11:42.316-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3932.902248629381, @@ -227,7 +238,8 @@ "n": 10, "sum": 39329.02248629381 } - }, { + }, + { "id": "apicall.httpclient.sync.UrlConnectionHttpClientBenchmark.sequentialApiCall-Throughput", "params": { "sdkVersion": "2.10.89", @@ -235,7 +247,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.316" + "date": "2020-03-18T20:11:42.316-07:00[America/Los_Angeles]" }, "statistics": { "mean": 769.724367683772, @@ -246,7 +258,8 @@ "n": 10, "sum": 7697.24367683772 } - }, { + }, + { "id": "apicall.protocol.Ec2ProtocolBenchmark.successfulResponse-Throughput", "params": { "sdkVersion": "2.10.89", @@ -254,7 +267,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.317" + "date": "2020-03-18T20:11:42.317-07:00[America/Los_Angeles]" }, "statistics": { "mean": 9487.796808217518, @@ -265,7 +278,8 @@ "n": 10, "sum": 94877.96808217518 } - }, { + }, + { "id": "apicall.protocol.JsonProtocolBenchmark.successfulResponse-Throughput", "params": { "sdkVersion": "2.10.89", @@ -273,7 +287,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.317" + "date": "2020-03-18T20:11:42.317-07:00[America/Los_Angeles]" }, "statistics": { "mean": 15239.050304507653, @@ -284,7 +298,8 @@ "n": 10, "sum": 152390.50304507653 } - }, { + }, + { "id": "apicall.protocol.QueryProtocolBenchmark.successfulResponse-Throughput", "params": { "sdkVersion": "2.10.89", @@ -292,7 +307,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.317" + "date": "2020-03-18T20:11:42.317-07:00[America/Los_Angeles]" }, "statistics": { "mean": 10511.163793405529, @@ -303,7 +318,8 @@ "n": 10, "sum": 105111.63793405528 } - }, { + }, + { "id": "apicall.protocol.XmlProtocolBenchmark.successfulResponse-Throughput", "params": { "sdkVersion": "2.10.89", @@ -311,7 +327,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "Throughput", - "date": "2020-03-18T20:11:42.317" + "date": "2020-03-18T20:11:42.317-07:00[America/Los_Angeles]" }, "statistics": { "mean": 8484.220376124444, @@ -322,7 +338,8 @@ "n": 10, "sum": 84842.20376124444 } - }, { + }, + { "id": "coldstart.V2OptimizedClientCreationBenchmark.createClient-SampleTime", "params": { "sdkVersion": "2.10.89", @@ -330,7 +347,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.222-b10", "mode": "SampleTime", - "date": "2020-03-18T20:11:42.33" + "date": "2020-03-18T20:11:42.33-07:00[America/Los_Angeles]" }, "statistics": { "mean": 0.19604848685545748, @@ -341,7 +358,8 @@ "n": 771613, "sum": 151273.5610880001 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.enhanceGet-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -349,7 +367,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.779" + "date": "2020-03-26T21:54:38.779-07:00[America/Los_Angeles]" }, "statistics": { "mean": 21861.411294887475, @@ -360,7 +378,8 @@ "n": 10, "sum": 218614.11294887474 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.enhanceGet-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -368,7 +387,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.798" + "date": "2020-03-26T21:54:38.798-07:00[America/Los_Angeles]" }, "statistics": { "mean": 19194.404041731374, @@ -379,7 +398,8 @@ "n": 10, "sum": 191944.04041731375 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.enhanceGet-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -387,7 +407,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.801" + "date": "2020-03-26T21:54:38.801-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5742.760128972843, @@ -398,7 +418,8 @@ "n": 10, "sum": 57427.60128972843 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.enhanceGet-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -406,7 +427,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.803" + "date": "2020-03-26T21:54:38.803-07:00[America/Los_Angeles]" }, "statistics": { "mean": 9123.68471587034, @@ -417,7 +438,8 @@ "n": 10, "sum": 91236.8471587034 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.lowLevelGet-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -425,7 +447,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.805" + "date": "2020-03-26T21:54:38.805-07:00[America/Los_Angeles]" }, "statistics": { "mean": 23727.653183389055, @@ -436,7 +458,8 @@ "n": 10, "sum": 237276.53183389056 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.lowLevelGet-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -444,7 +467,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.807" + "date": "2020-03-26T21:54:38.807-07:00[America/Los_Angeles]" }, "statistics": { "mean": 21204.570979007094, @@ -455,7 +478,8 @@ "n": 10, "sum": 212045.70979007095 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.lowLevelGet-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -463,7 +487,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.809" + "date": "2020-03-26T21:54:38.809-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6631.846341687633, @@ -474,7 +498,8 @@ "n": 10, "sum": 66318.46341687633 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetOverheadBenchmark.lowLevelGet-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -482,7 +507,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.811" + "date": "2020-03-26T21:54:38.811-07:00[America/Los_Angeles]" }, "statistics": { "mean": 10065.700621509586, @@ -493,7 +518,8 @@ "n": 10, "sum": 100657.00621509585 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.enhancedPut-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -501,7 +527,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.813" + "date": "2020-03-26T21:54:38.813-07:00[America/Los_Angeles]" }, "statistics": { "mean": 23635.986227776833, @@ -512,7 +538,8 @@ "n": 10, "sum": 236359.86227776835 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.enhancedPut-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -520,7 +547,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.816" + "date": "2020-03-26T21:54:38.816-07:00[America/Los_Angeles]" }, "statistics": { "mean": 20950.69006280451, @@ -531,7 +558,8 @@ "n": 10, "sum": 209506.9006280451 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.enhancedPut-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -539,7 +567,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.818" + "date": "2020-03-26T21:54:38.818-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6947.0547317414, @@ -550,7 +578,8 @@ "n": 10, "sum": 69470.547317414 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.enhancedPut-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -558,7 +587,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.819" + "date": "2020-03-26T21:54:38.819-07:00[America/Los_Angeles]" }, "statistics": { "mean": 9651.438384939946, @@ -569,7 +598,8 @@ "n": 10, "sum": 96514.38384939946 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.lowLevelPut-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -577,7 +607,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.821" + "date": "2020-03-26T21:54:38.821-07:00[America/Los_Angeles]" }, "statistics": { "mean": 24474.133695525416, @@ -588,7 +618,8 @@ "n": 10, "sum": 244741.33695525417 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.lowLevelPut-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -596,7 +627,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.823" + "date": "2020-03-26T21:54:38.823-07:00[America/Los_Angeles]" }, "statistics": { "mean": 21708.256095745754, @@ -607,7 +638,8 @@ "n": 10, "sum": 217082.56095745755 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.lowLevelPut-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -615,7 +647,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.824" + "date": "2020-03-26T21:54:38.824-07:00[America/Los_Angeles]" }, "statistics": { "mean": 7831.76449879679, @@ -626,7 +658,8 @@ "n": 10, "sum": 78317.6449879679 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutOverheadBenchmark.lowLevelPut-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -634,7 +667,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.826" + "date": "2020-03-26T21:54:38.826-07:00[America/Los_Angeles]" }, "statistics": { "mean": 10432.187037993292, @@ -645,7 +678,8 @@ "n": 10, "sum": 104321.87037993292 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Get-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -653,18 +687,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.83" + "date": "2020-03-26T21:54:38.83-07:00[America/Los_Angeles]" }, "statistics": { "mean": 4216269.465030504, - "variance": 7577381680.455024, + "variance": 7.577381680455024E9, "standardDeviation": 87048.1572490482, "max": 4304995.187978772, "min": 4127750.465031905, "n": 10, - "sum": 42162694.65030504 + "sum": 4.216269465030504E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Get-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -672,18 +707,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.831" + "date": "2020-03-26T21:54:38.831-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2548116.917228338, - "variance": 39596645.65844414, + "variance": 3.959664565844414E7, "standardDeviation": 6292.586563444649, "max": 2553688.8961462937, "min": 2536667.0775304707, "n": 10, - "sum": 25481169.172283377 + "sum": 2.5481169172283377E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Get-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -691,7 +727,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.833" + "date": "2020-03-26T21:54:38.833-07:00[America/Los_Angeles]" }, "statistics": { "mean": 271517.73760595697, @@ -702,7 +738,8 @@ "n": 10, "sum": 2715177.37605957 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Get-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -710,18 +747,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.835" + "date": "2020-03-26T21:54:38.835-07:00[America/Los_Angeles]" }, "statistics": { "mean": 347920.5003151236, - "variance": 595046205.154461, + "variance": 5.95046205154461E8, "standardDeviation": 24393.56893024186, "max": 371195.38010237005, "min": 324573.14439857507, "n": 10, "sum": 3479205.003151236 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Put-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -729,18 +767,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.836" + "date": "2020-03-26T21:54:38.836-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1768330.9260150697, - "variance": 1939469873.1268594, + "variance": 1.9394698731268594E9, "standardDeviation": 44039.412724590904, "max": 1811149.0295745614, "min": 1724510.13860136, "n": 10, - "sum": 17683309.260150697 + "sum": 1.7683309260150697E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Put-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -748,18 +787,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.838" + "date": "2020-03-26T21:54:38.838-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1164870.7035331992, - "variance": 52647682.835541315, + "variance": 5.2647682835541315E7, "standardDeviation": 7255.8723001126, "max": 1174402.0970783627, "min": 1155823.7219991074, "n": 10, - "sum": 11648707.035331992 + "sum": 1.1648707035331992E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Put-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -767,7 +807,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.839" + "date": "2020-03-26T21:54:38.839-07:00[America/Los_Angeles]" }, "statistics": { "mean": 210052.38869182704, @@ -778,7 +818,8 @@ "n": 10, "sum": 2100523.8869182705 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v1Put-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -786,7 +827,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.841" + "date": "2020-03-26T21:54:38.841-07:00[America/Los_Angeles]" }, "statistics": { "mean": 114978.94801995096, @@ -797,7 +838,8 @@ "n": 10, "sum": 1149789.4801995095 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Get-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -805,18 +847,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.842" + "date": "2020-03-26T21:54:38.842-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3662576.5648506857, - "variance": 7409482642.244276, + "variance": 7.409482642244276E9, "standardDeviation": 86078.35176305525, "max": 3748802.8826729953, "min": 3575363.8664258076, "n": 10, - "sum": 36625765.64850686 + "sum": 3.662576564850686E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Get-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -824,18 +867,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.844" + "date": "2020-03-26T21:54:38.844-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1646303.6313321758, - "variance": 1574901387.9625113, + "variance": 1.5749013879625113E9, "standardDeviation": 39685.027251628686, "max": 1686779.9815694, "min": 1607103.7820484997, "n": 10, - "sum": 16463036.313321758 + "sum": 1.6463036313321758E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Get-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -843,7 +887,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.845" + "date": "2020-03-26T21:54:38.845-07:00[America/Los_Angeles]" }, "statistics": { "mean": 129737.87890043444, @@ -854,7 +898,8 @@ "n": 10, "sum": 1297378.7890043445 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Get-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -862,7 +907,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.847" + "date": "2020-03-26T21:54:38.847-07:00[America/Los_Angeles]" }, "statistics": { "mean": 276472.2259425583, @@ -873,7 +918,8 @@ "n": 10, "sum": 2764722.2594255833 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Put-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -881,18 +927,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.848" + "date": "2020-03-26T21:54:38.848-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3971820.2835967117, - "variance": 17460612994.02266, + "variance": 1.746061299402266E10, "standardDeviation": 132138.61280497332, "max": 4108330.055204355, "min": 3840104.0305961887, "n": 10, - "sum": 39718202.835967116 + "sum": 3.9718202835967116E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Put-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -900,7 +947,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.849" + "date": "2020-03-26T21:54:38.849-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1493615.511958485, @@ -909,9 +956,10 @@ "max": 1498786.274708349, "min": 1488510.4353162742, "n": 10, - "sum": 14936155.11958485 + "sum": 1.493615511958485E7 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Put-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -919,7 +967,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.851" + "date": "2020-03-26T21:54:38.851-07:00[America/Los_Angeles]" }, "statistics": { "mean": 119057.84161286886, @@ -930,7 +978,8 @@ "n": 10, "sum": 1190578.4161286885 } - }, { + }, + { "id": "enhanced.dynamodb.V1MapperComparisonBenchmark.v2Put-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.2-SNAPSHOT", @@ -938,18 +987,19 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-26T21:54:38.852" + "date": "2020-03-26T21:54:38.852-07:00[America/Los_Angeles]" }, "statistics": { "mean": 146022.84478369894, - "variance": 327890156.19659877, + "variance": 3.2789015619659877E8, "standardDeviation": 18107.737467629653, "max": 163566.06331238395, "min": 128721.90017507998, "n": 10, "sum": 1460228.4478369893 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v1Delete-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -957,7 +1007,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.606" + "date": "2020-03-31T20:56:25.606-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6829142.946589122, @@ -968,7 +1018,8 @@ "n": 10, "sum": 6.829142946589121E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v1Delete-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -976,7 +1027,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.624" + "date": "2020-03-31T20:56:25.624-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6808359.910172634, @@ -987,7 +1038,8 @@ "n": 10, "sum": 6.808359910172634E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v1Delete-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -995,7 +1047,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.625" + "date": "2020-03-31T20:56:25.625-07:00[America/Los_Angeles]" }, "statistics": { "mean": 6760046.190670421, @@ -1006,7 +1058,8 @@ "n": 10, "sum": 6.760046190670422E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v1Delete-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1014,7 +1067,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.625" + "date": "2020-03-31T20:56:25.625-07:00[America/Los_Angeles]" }, "statistics": { "mean": 7063555.657198062, @@ -1025,7 +1078,8 @@ "n": 10, "sum": 7.063555657198063E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v2Delete-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1033,7 +1087,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.625" + "date": "2020-03-31T20:56:25.625-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5218929.153482059, @@ -1044,7 +1098,8 @@ "n": 10, "sum": 5.2189291534820594E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v2Delete-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1052,7 +1107,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.626" + "date": "2020-03-31T20:56:25.626-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5269447.416256654, @@ -1063,7 +1118,8 @@ "n": 10, "sum": 5.269447416256654E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v2Delete-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1071,7 +1127,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.626" + "date": "2020-03-31T20:56:25.626-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5233493.1041884385, @@ -1082,7 +1138,8 @@ "n": 10, "sum": 5.2334931041884385E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientDeleteV1MapperComparisonBenchmark.v2Delete-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1090,7 +1147,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.626" + "date": "2020-03-31T20:56:25.626-07:00[America/Los_Angeles]" }, "statistics": { "mean": 5333879.183913028, @@ -1101,7 +1158,8 @@ "n": 10, "sum": 5.333879183913028E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v1Get-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1109,7 +1167,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 4254457.087292329, @@ -1120,7 +1178,8 @@ "n": 10, "sum": 4.254457087292329E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v1Get-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1128,7 +1187,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2541003.3761009574, @@ -1139,7 +1198,8 @@ "n": 10, "sum": 2.5410033761009574E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v1Get-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1147,7 +1207,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 276072.8892665714, @@ -1158,7 +1218,8 @@ "n": 10, "sum": 2760728.8926657136 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v1Get-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1166,7 +1227,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 334086.66986329446, @@ -1177,7 +1238,8 @@ "n": 10, "sum": 3340866.6986329444 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v2Get-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1185,7 +1247,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.632" + "date": "2020-03-31T20:56:25.632-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3668470.1459464533, @@ -1196,7 +1258,8 @@ "n": 10, "sum": 3.6684701459464535E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v2Get-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1204,7 +1267,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.633" + "date": "2020-03-31T20:56:25.633-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1705518.9560612484, @@ -1215,7 +1278,8 @@ "n": 10, "sum": 1.7055189560612485E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v2Get-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1223,7 +1287,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.633" + "date": "2020-03-31T20:56:25.633-07:00[America/Los_Angeles]" }, "statistics": { "mean": 136996.69293126452, @@ -1234,7 +1298,8 @@ "n": 10, "sum": 1369966.9293126452 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientGetV1MapperComparisonBenchmark.v2Get-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1242,18 +1307,20 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.633" + "date": "2020-03-31T20:56:25.633-07:00[America/Los_Angeles]" }, "statistics": { "mean": 283351.0162156861, "variance": 1.0298588244596072E7, "standardDeviation": 3209.1413562814696, "max": 286589.6531841922, - "min": 280136.4515638473, + "min": 280136.45 + 15638473, "n": 10, "sum": 2833510.1621568613 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v1Put-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1261,7 +1328,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.636" + "date": "2020-03-31T20:56:25.636-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3989391.9219655544, @@ -1272,7 +1339,8 @@ "n": 10, "sum": 3.989391921965554E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v1Put-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1280,7 +1348,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.637" + "date": "2020-03-31T20:56:25.637-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2171253.7675951715, @@ -1291,7 +1359,8 @@ "n": 10, "sum": 2.1712537675951716E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v1Put-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1299,7 +1368,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.637" + "date": "2020-03-31T20:56:25.637-07:00[America/Los_Angeles]" }, "statistics": { "mean": 244529.021162057, @@ -1310,7 +1379,8 @@ "n": 10, "sum": 2445290.21162057 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v1Put-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1318,7 +1388,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.637" + "date": "2020-03-31T20:56:25.637-07:00[America/Los_Angeles]" }, "statistics": { "mean": 176271.52763779167, @@ -1329,7 +1399,8 @@ "n": 10, "sum": 1762715.2763779168 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v2Put-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1337,7 +1408,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.637" + "date": "2020-03-31T20:56:25.637-07:00[America/Los_Angeles]" }, "statistics": { "mean": 3980473.869357331, @@ -1348,7 +1419,8 @@ "n": 10, "sum": 3.980473869357331E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v2Put-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1356,7 +1428,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1537572.9568381808, @@ -1367,7 +1439,8 @@ "n": 10, "sum": 1.5375729568381809E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v2Put-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1375,7 +1448,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 122705.30057333391, @@ -1386,7 +1459,8 @@ "n": 10, "sum": 1227053.005733339 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientPutV1MapperComparisonBenchmark.v2Put-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1394,7 +1468,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 149741.39277360524, @@ -1405,7 +1479,8 @@ "n": 10, "sum": 1497413.9277360525 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v1Query-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1413,7 +1488,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1252512.7584237454, @@ -1424,7 +1499,8 @@ "n": 10, "sum": 1.2525127584237454E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v1Query-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1432,7 +1508,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 696709.8287589755, @@ -1443,7 +1519,8 @@ "n": 10, "sum": 6967098.287589755 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v1Query-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1451,7 +1528,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.638" + "date": "2020-03-31T20:56:25.638-07:00[America/Los_Angeles]" }, "statistics": { "mean": 87840.49274328267, @@ -1462,7 +1539,8 @@ "n": 10, "sum": 878404.9274328267 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v1Query-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1470,7 +1548,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 98567.58634308925, @@ -1481,7 +1559,8 @@ "n": 10, "sum": 985675.8634308925 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v2Query-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1489,7 +1568,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 559019.8433378737, @@ -1500,7 +1579,8 @@ "n": 10, "sum": 5590198.433378737 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v2Query-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1508,7 +1588,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 333270.58140860766, @@ -1519,7 +1599,8 @@ "n": 10, "sum": 3332705.8140860763 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v2Query-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1527,7 +1608,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 37840.02739826469, @@ -1538,7 +1619,8 @@ "n": 10, "sum": 378400.2739826469 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientQueryV1MapperComparisonBenchmark.v2Query-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1546,7 +1628,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 87298.81206391682, @@ -1557,7 +1639,8 @@ "n": 10, "sum": 872988.1206391682 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v1Scan-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1565,7 +1648,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.639" + "date": "2020-03-31T20:56:25.639-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1816952.3977204207, @@ -1576,7 +1659,8 @@ "n": 10, "sum": 1.8169523977204207E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v1Scan-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1584,7 +1668,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 967205.4743976349, @@ -1595,7 +1679,8 @@ "n": 10, "sum": 9672054.743976349 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v1Scan-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1603,7 +1688,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 91879.33897708468, @@ -1614,7 +1699,8 @@ "n": 10, "sum": 918793.3897708468 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v1Scan-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1622,7 +1708,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 116637.4994496019, @@ -1633,7 +1719,8 @@ "n": 10, "sum": 1166374.994496019 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v2Scan-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1641,7 +1728,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2410421.1020664377, @@ -1652,7 +1739,8 @@ "n": 10, "sum": 2.410421102066438E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v2Scan-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1660,7 +1748,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 750074.5784853816, @@ -1671,7 +1759,8 @@ "n": 10, "sum": 7500745.784853815 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v2Scan-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1679,7 +1768,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.64" + "date": "2020-03-31T20:56:25.64-07:00[America/Los_Angeles]" }, "statistics": { "mean": 42561.66188409884, @@ -1690,7 +1779,8 @@ "n": 10, "sum": 425616.6188409884 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientScanV1MapperComparisonBenchmark.v2Scan-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1698,7 +1788,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 96898.57710736312, @@ -1709,7 +1799,8 @@ "n": 10, "sum": 968985.7710736311 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v1Update-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1717,7 +1808,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 2661602.8279100014, @@ -1728,7 +1819,8 @@ "n": 10, "sum": 2.6616028279100016E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v1Update-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1736,7 +1828,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1507250.535625762, @@ -1747,7 +1839,8 @@ "n": 10, "sum": 1.5072505356257621E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v1Update-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1755,7 +1848,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 219532.91136539596, @@ -1766,7 +1859,8 @@ "n": 10, "sum": 2195329.1136539597 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v1Update-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1774,7 +1868,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 145165.8982342992, @@ -1785,7 +1879,8 @@ "n": 10, "sum": 1451658.982342992 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v2Update-Throughput-testItem-TINY", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1793,7 +1888,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.641" + "date": "2020-03-31T20:56:25.641-07:00[America/Los_Angeles]" }, "statistics": { "mean": 1335244.2925506658, @@ -1804,7 +1899,8 @@ "n": 10, "sum": 1.3352442925506659E7 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v2Update-Throughput-testItem-SMALL", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1812,7 +1908,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.642" + "date": "2020-03-31T20:56:25.642-07:00[America/Los_Angeles]" }, "statistics": { "mean": 233466.6326390164, @@ -1823,7 +1919,8 @@ "n": 10, "sum": 2334666.326390164 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v2Update-Throughput-testItem-HUGE", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1831,7 +1928,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.642" + "date": "2020-03-31T20:56:25.642-07:00[America/Los_Angeles]" }, "statistics": { "mean": 38060.01754411877, @@ -1842,7 +1939,8 @@ "n": 10, "sum": 380600.1754411877 } - }, { + }, + { "id": "enhanced.dynamodb.EnhancedClientUpdateV1MapperComparisonBenchmark.v2Update-Throughput-testItem-HUGE_FLAT", "params": { "sdkVersion": "2.11.5-SNAPSHOT", @@ -1850,7 +1948,7 @@ "jvmName": "OpenJDK 64-Bit Server VM", "jvmVersion": "25.242-b08", "mode": "Throughput", - "date": "2020-03-31T20:56:25.642" + "date": "2020-03-31T20:56:25.642-07:00[America/Los_Angeles]" }, "statistics": { "mean": 16831.633221796015, @@ -1862,4 +1960,4 @@ "sum": 168316.33221796015 } } -] +] \ No newline at end of file diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index 6bb7fa84e60..8586952d9ba 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index b52f17756fd..91e623dd8ae 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index ec2f1e98237..b0b308ed35d 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 573198abefa..5bbf15f5f05 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 254fa7d9daf..4454c563ba8 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/third-party/pom.xml b/third-party/pom.xml index c2c6e2bb5c5..5f9e17a787e 100644 --- a/third-party/pom.xml +++ b/third-party/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT third-party diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml index c5da43f6db3..9f85ed6fc21 100644 --- a/third-party/third-party-jackson-core/pom.xml +++ b/third-party/third-party-jackson-core/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml index facc935202e..2a196ff3ddd 100644 --- a/third-party/third-party-jackson-dataformat-cbor/pom.xml +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index 87783b1a474..1452373102f 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.20.126-SNAPSHOT + 2.20.138-SNAPSHOT 4.0.0 diff --git a/utils/src/main/java/software/amazon/awssdk/utils/Validate.java b/utils/src/main/java/software/amazon/awssdk/utils/Validate.java index 7890c3ee14c..6941ad9a252 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/Validate.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/Validate.java @@ -656,6 +656,19 @@ public static int isNotNegative(int num, String fieldName) { return num; } + public static Long isNotNegativeOrNull(Long num, String fieldName) { + + if (num == null) { + return null; + } + + if (num < 0) { + throw new IllegalArgumentException(String.format("%s must not be negative", fieldName)); + } + + return num; + } + public static long isNotNegative(long num, String fieldName) { if (num < 0) { diff --git a/utils/src/test/java/software/amazon/awssdk/utils/ValidateTest.java b/utils/src/test/java/software/amazon/awssdk/utils/ValidateTest.java index 2983398f83d..29bc80edbe8 100644 --- a/utils/src/test/java/software/amazon/awssdk/utils/ValidateTest.java +++ b/utils/src/test/java/software/amazon/awssdk/utils/ValidateTest.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.utils; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; @@ -610,6 +611,19 @@ public void isNull_notNull_shouldThrow() { Validate.isNull("string", "not null"); } + @Test + public void isNotNegativeOrNull_negative_throws() { + expected.expect(IllegalArgumentException.class); + expected.expectMessage("foo"); + Validate.isNotNegativeOrNull(-1L, "foo"); + } + + @Test + public void isNotNegativeOrNull_notNegative_notThrow() { + assertThat(Validate.isNotNegativeOrNull(5L, "foo")).isEqualTo(5L); + assertThat(Validate.isNotNegativeOrNull(0L, "foo")).isEqualTo(0L); + } + @Test public void isNull_null_shouldPass() { Validate.isNull(null, "not null");